text stringlengths 4 1.02M | meta dict |
|---|---|
"""Tests for tensorflow.ops.control_flow_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import device_lib
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gen_logging_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad # pylint: disable=unused-import
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
# pylint: disable=unused-import
import tensorflow.python.ops.tensor_array_grad
# pylint: enable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.training import adam
from tensorflow.python.training import gradient_descent
from tensorflow.python.util import nest
def check_consumers(graph):
"""Sanity check on the consumer list of the tensors."""
consumer_count = {}
for op in graph.get_operations():
for v in op.inputs:
cnt = consumer_count.get(v, 0)
consumer_count[v] = cnt + 1
for k, v in consumer_count.items():
if len(k.consumers()) != v:
return False
return True
def all_fetchables():
tensor_names = []
graph = ops.get_default_graph()
for op in graph.get_operations():
for t in op.outputs:
if graph.is_fetchable(t):
tensor_names.append(t.name)
return tensor_names
def all_feedables():
feedable_tensors = []
graph = ops.get_default_graph()
for op in graph.get_operations():
for t in op.inputs:
if graph.is_feedable(t):
feedable_tensors.append(t)
return feedable_tensors
def opt_cfg():
return config_pb2.ConfigProto(
allow_soft_placement=True,
graph_options=config_pb2.GraphOptions(
optimizer_options=config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L1,
do_function_inlining=True,
do_constant_folding=True)))
def isum(s, maximum_iterations=None):
i = constant_op.constant(0, name="i")
c = lambda i, s: math_ops.less(i, 10)
b = lambda i, s: [math_ops.add(i, 1), math_ops.add(i, s)]
_, r_s = control_flow_ops.while_loop(
c, b, [i, s], maximum_iterations=maximum_iterations)
return r_s
@test_util.with_c_api
class ControlFlowTest(test.TestCase):
def testRefIdentity(self):
with self.test_session():
v = variables.Variable(7)
v = control_flow_ops._Identity(v)
op = state_ops.assign(v, 9)
v2 = control_flow_ops.with_dependencies([op], v)
self.assertTrue(isinstance(v2, ops.Tensor))
variables.global_variables_initializer().run()
self.assertEqual(9, v2.eval())
def testRefEnter(self):
with self.test_session():
v = variables.Variable(7)
enter_v = control_flow_ops._Enter(v, "foo_1", is_constant=True)
nine = constant_op.constant(9)
enter_nine = control_flow_ops.enter(nine, "foo_1")
op = state_ops.assign(enter_v, enter_nine)
v2 = control_flow_ops.with_dependencies([op], enter_v)
v3 = control_flow_ops.exit(v2)
variables.global_variables_initializer().run()
self.assertEqual(9, v3.eval())
def testRefSwitch(self):
with self.test_session():
v = variables.Variable(7)
p = constant_op.constant(True)
v1 = control_flow_ops._SwitchRefOrTensor(v._ref(), p) # pylint: disable=protected-access
v2 = state_ops.assign(v1[1], 9)
variables.global_variables_initializer().run()
self.assertEqual(9, v2.eval())
def testEnterMulExit(self):
with self.test_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
enter_data = control_flow_ops.enter(data, "foo_1", False)
five = constant_op.constant(5)
enter_five = control_flow_ops.enter(five, "foo_1", False)
mul_op = math_ops.multiply(enter_data, enter_five)
exit_op = control_flow_ops.exit(mul_op)
result = exit_op.eval()
self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result)
def testEnterShapePropagation(self):
with self.test_session():
v = variables.Variable([0.0, 0.0], dtype=dtypes.float32)
# If is_constant=True, the shape information should be propagated.
enter_v_constant = control_flow_ops.enter(v, "frame1", is_constant=True)
self.assertEqual(enter_v_constant.shape, [2])
# Otherwise, the shape should be unknown.
enter_v_non_constant = control_flow_ops.enter(v, "frame2",
is_constant=False)
self.assertEqual(enter_v_non_constant.shape, None)
def testSwitchMergeIndexedSlices(self):
with self.test_session():
values = constant_op.constant([1, 2, 3, 4, 5, 6])
indices = constant_op.constant([0, 2, 4, 6, 8, 10])
data = ops.IndexedSlices(values, indices)
pred = ops.convert_to_tensor(True)
switch_op = control_flow_ops.switch(data, pred)
merge_op = control_flow_ops.merge(switch_op)[0]
val = merge_op.values.eval()
ind = merge_op.indices.eval()
self.assertAllEqual(np.arange(1, 7), val)
self.assertAllEqual(np.arange(0, 12, 2), ind)
def testSwitchDeadBranch(self):
with self.test_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
ports = ops.convert_to_tensor(True, name="ports")
switch_op = control_flow_ops.switch(data, ports)
dead_branch = array_ops.identity(switch_op[0])
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Retval[0] does not have value" in str(e)):
dead_branch.eval()
def testSwitchMergeLess(self):
with self.test_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
zero = ops.convert_to_tensor(0)
one = ops.convert_to_tensor(1)
less_op = math_ops.less(zero, one)
switch_op = control_flow_ops.switch(data, less_op)
merge_op = control_flow_ops.merge(switch_op)[0]
result = merge_op.eval()
self.assertAllEqual(np.arange(1, 7), result)
def testSwitchMergeAddIdentity(self):
with self.test_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
ports = ops.convert_to_tensor(False, name="ports")
switch_op = control_flow_ops.switch(data, ports)
one = constant_op.constant(1)
add_op = math_ops.add(switch_op[0], one)
id_op = array_ops.identity(switch_op[1])
merge_op = control_flow_ops.merge([add_op, id_op])[0]
result = merge_op.eval()
self.assertAllEqual(np.array([x + 1 for x in [1, 2, 3, 4, 5, 6]]), result)
def testSwitchMergeAddMul(self):
with self.test_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
ports = ops.convert_to_tensor(True, name="ports")
switch_op = control_flow_ops.switch(data, ports)
one = constant_op.constant(1)
add_op = math_ops.add(switch_op[0], one)
five = constant_op.constant(5)
mul_op = math_ops.multiply(switch_op[1], five)
merge_op = control_flow_ops.merge([add_op, mul_op])[0]
result = merge_op.eval()
self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result)
def testLoop_false(self):
with self.test_session():
false = ops.convert_to_tensor(False)
n = constant_op.constant(10)
enter_false = control_flow_ops.enter(false, "foo_1", False)
enter_n = control_flow_ops.enter(n, "foo_1", False)
merge_n = control_flow_ops.merge([enter_n, enter_n], name="merge_n")[0]
switch_n = control_flow_ops.switch(merge_n, enter_false)
exit_n = control_flow_ops.exit(switch_n[0])
next_n = control_flow_ops.next_iteration(switch_n[0])
merge_n.op._update_input(1, next_n)
result = exit_n.eval()
self.assertAllEqual(10, result)
def testLoop_1(self):
with self.test_session():
zero = constant_op.constant(0)
one = constant_op.constant(1)
n = constant_op.constant(10)
enter_i = control_flow_ops.enter(zero, "foo", False)
enter_one = control_flow_ops.enter(one, "foo", True)
enter_n = control_flow_ops.enter(n, "foo", True)
with ops.device(test.gpu_device_name()):
merge_i = control_flow_ops.merge([enter_i, enter_i])[0]
less_op = math_ops.less(merge_i, enter_n)
cond_op = control_flow_ops.loop_cond(less_op)
switch_i = control_flow_ops.switch(merge_i, cond_op)
add_i = math_ops.add(switch_i[1], enter_one)
next_i = control_flow_ops.next_iteration(add_i)
merge_i.op._update_input(1, next_i)
exit_i = control_flow_ops.exit(switch_i[0])
result = exit_i.eval()
self.assertAllEqual(10, result)
def testLoop_2(self):
with self.test_session():
zero = constant_op.constant(0)
one = constant_op.constant(1)
n = constant_op.constant(10)
enter_i = control_flow_ops.enter(zero, "foo", False)
enter_one = control_flow_ops.enter(one, "foo", True)
enter_n = control_flow_ops.enter(n, "foo", True)
merge_i = control_flow_ops.merge([enter_i, enter_i])[0]
less_op = math_ops.less(merge_i, enter_n)
cond_op = control_flow_ops.loop_cond(less_op)
switch_i = control_flow_ops.switch(merge_i, cond_op)
add_i = math_ops.add(switch_i[1], enter_one)
with ops.device(test.gpu_device_name()):
next_i = control_flow_ops.next_iteration(add_i)
merge_i.op._update_input(1, next_i)
exit_i = control_flow_ops.exit(switch_i[0])
result = exit_i.eval()
self.assertAllEqual(10, result)
def testDifferentFrame(self):
with self.test_session():
data = array_ops.placeholder(dtypes.float32, shape=[])
enter_1 = control_flow_ops.enter(data, "foo_1", False)
enter_2 = control_flow_ops.enter(data, "foo_2", False)
res = math_ops.add(enter_1, enter_2)
with self.assertRaisesOpError("has inputs from different frames"):
res.eval(feed_dict={data: 1.0})
def testCondBool(self):
values = constant_op.constant(10)
fn1 = lambda: math_ops.add(values, 1)
fn2 = lambda: math_ops.subtract(values, 1)
with self.assertRaisesRegexp(TypeError, "must not be a Python bool"):
_ = control_flow_ops.cond(False, fn1, fn2)
def testCondInt(self):
p = array_ops.placeholder(dtypes.bool, shape=[])
v = constant_op.constant(10)
fn1 = lambda: math_ops.add(v, 1)
fn2 = lambda: math_ops.subtract(v, 1)
y = control_flow_ops.cond(p, fn1, fn2)
grad = gradients_impl.gradients(y, [v])
self.assertAllEqual([None], grad)
def testFetchable(self):
with self.test_session() as sess:
x = array_ops.placeholder(dtypes.float32)
control_flow_ops.cond(
constant_op.constant(True), lambda: x + 2, lambda: x + 0)
graph = ops.get_default_graph()
for op in graph.get_operations():
for t in op.inputs:
if graph.is_fetchable(t.op):
sess.run(t, feed_dict={x: 3})
else:
with self.assertRaisesRegexp(ValueError,
"has been marked as not fetchable"):
sess.run(t, feed_dict={x: 3})
def testFeedable(self):
with self.test_session() as sess:
c = constant_op.constant(2)
i0 = constant_op.constant(0)
r = control_flow_ops.while_loop(lambda i: i < 1000,
lambda i: math_ops.square(c) + i, [i0])
self.assertEqual(1000, r.eval(feed_dict={i0: 0}))
feedable_tensors = all_feedables()
for t in feedable_tensors:
sess.run(r, feed_dict={t: 3})
graph = ops.get_default_graph()
for op in graph.get_operations():
for t in op.inputs:
if t not in feedable_tensors and t.dtype is dtypes.int32:
with self.assertRaisesRegexp(ValueError, "may not be fed"):
sess.run(r, feed_dict={t: 3})
def testCondIndexedSlices(self):
with self.test_session():
values = constant_op.constant(10)
indices = constant_op.constant(0)
x = ops.IndexedSlices(values, indices)
pred = math_ops.less(1, 2)
fn1 = lambda: ops.IndexedSlices(math_ops.add(x.values, 1), indices)
fn2 = lambda: ops.IndexedSlices(math_ops.subtract(x.values, 1), indices)
r = control_flow_ops.cond(pred, fn1, fn2)
val = r.values.eval()
ind = r.indices.eval()
self.assertAllEqual(11, val)
self.assertAllEqual(0, ind)
def testCondSparseTensor(self):
with self.test_session():
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant(
[[0], [3]], dtype=dtypes.int64, name="indices")
shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape")
x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
pred = math_ops.less(1, 2)
fn1 = lambda: sparse_tensor.SparseTensor(
indices + 1, x.values + 1, dense_shape=shape)
fn2 = lambda: sparse_tensor.SparseTensor(
indices, x.values - 1, dense_shape=shape)
r = control_flow_ops.cond(pred, fn1, fn2)
self.assertAllEqual([3.0, 5.0], r.values.eval())
self.assertAllEqual([[1], [4]], r.indices.eval())
self.assertAllEqual(r.values.get_shape(), (2,))
def testCondResource(self):
with self.test_session():
rv = resource_variable_ops.ResourceVariable(True)
variables.global_variables_initializer().run()
t = ops.convert_to_tensor(1.0)
def case():
assign = resource_variable_ops.assign_variable_op(rv.handle, False)
with ops.control_dependencies([assign]):
return array_ops.identity(t)
self.assertEqual(1.0, control_flow_ops.cond(rv, case, lambda: t).eval())
def testCondIndexedSlicesDifferentTypes(self):
with self.test_session():
values = constant_op.constant(10)
i_32 = ops.convert_to_tensor(0, name="one", dtype=dtypes.int32)
i_64 = ops.convert_to_tensor(0, name="one", dtype=dtypes.int64)
x = ops.IndexedSlices(values, i_32)
pred = math_ops.less(1, 2)
fn1 = lambda: ops.IndexedSlices(math_ops.add(x.values, 1), i_32)
fn2 = lambda: ops.IndexedSlices(math_ops.subtract(x.values, 1), i_64)
r = control_flow_ops.cond(pred, fn1, fn2)
val = r.values.eval()
ind = r.indices.eval()
self.assertAllEqual(11, val)
self.assertAllEqual(0, ind)
self.assertTrue(ind.dtype == np.int64)
def testCondColocation(self):
with self.test_session(use_gpu=True):
with ops.device("/cpu:0"):
v = variables.Variable(7.0)
x = constant_op.constant(10.0)
pred = math_ops.less(1.0, 2.0)
fn1 = lambda: math_ops.add(v, 1.0)
fn2 = lambda: math_ops.subtract(x, 1.0)
r = control_flow_ops.cond(pred, fn1, fn2)
for op in x.graph.get_operations():
if op.name == "cond/Add/Switch":
self.assertDeviceEqual(op.device, "/cpu:0")
def _testCond_1(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
x = constant_op.constant(10)
pred = math_ops.less(1, 2)
fn1 = lambda: math_ops.add(x, 1)
fn2 = lambda: math_ops.subtract(x, 1)
r = control_flow_ops.cond(pred, fn1, fn2)
result = r.eval()
self.assertAllEqual(11, result)
def testCond_1(self):
self._testCond_1(use_gpu=False)
self._testCond_1(use_gpu=True)
def testCond_2(self):
with self.test_session():
x = constant_op.constant(10)
r = control_flow_ops.cond(
math_ops.less(1, 0), lambda: math_ops.add(x, 1),
lambda: math_ops.subtract(x, 1))
result = r.eval()
self.assertAllEqual(9, result)
def testCond_3(self):
with self.test_session():
x = constant_op.constant(10)
pred = math_ops.less(1, 2)
fn1 = lambda: math_ops.add(x, 1)
fn2 = lambda: math_ops.subtract(x, 1)
fn3 = lambda: math_ops.add(control_flow_ops.cond(pred, fn1, fn2), 1)
r = control_flow_ops.cond(pred, fn3, fn2)
result = r.eval()
self.assertAllEqual(12, result)
def testCond_4(self):
with self.test_session():
v1 = variables.Variable(7)
v2 = variables.Variable(7)
v3 = variables.Variable(7)
age = constant_op.constant(3)
max_age = constant_op.constant(2)
pred = math_ops.greater(age, max_age)
fn1 = lambda: [state_ops.assign(v1, 1).op, state_ops.assign(v2, 2).op]
fn2 = lambda: [state_ops.assign(v3, 3).op, constant_op.constant(10).op]
r = control_flow_ops.cond(pred, fn1, fn2)
variables.global_variables_initializer().run()
self.assertEqual(len(r), 2)
result = r[1].eval()
self.assertAllEqual(True, result)
self.assertAllEqual(7, v1.eval())
self.assertAllEqual(2, v2.eval())
self.assertAllEqual(7, v3.eval())
def testCond_5(self):
with self.test_session():
alive = constant_op.constant(True, name="alive")
count = constant_op.constant(0, name="count")
def body(i):
return control_flow_ops.cond(
alive, lambda: [math_ops.less(i, 3), math_ops.add(count, 1)],
lambda: [alive, count])
for i in range(10):
alive, count = body(i)
self.assertAllEqual(4, count.eval())
def testCond_6(self):
with self.test_session():
v1 = variables.Variable([7])
age = constant_op.constant(3)
pred = math_ops.greater(age, 4)
fn1 = lambda: age
fn2 = lambda: v1
r = control_flow_ops.cond(pred, fn1, fn2)
variables.global_variables_initializer().run()
result = r.eval()
self.assertAllEqual(np.array([7]), result)
def testCond_7(self):
with self.test_session() as sess:
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: [math_ops.add(x, 1), math_ops.add(x, 2)]
fn2 = lambda: [y, y]
r = control_flow_ops.cond(pred, fn1, fn2)
self.assertAllEqual([11, 12], sess.run(r))
def testCondRef(self):
with self.test_session():
x = gen_state_ops._variable(
shape=[1],
dtype=dtypes.float32,
name="x",
container="",
shared_name="")
true_fn = lambda: x
false_fn = lambda: constant_op.constant([2.0])
r = control_flow_ops.cond(constant_op.constant(False), true_fn, false_fn)
self.assertAllEqual([2.0], r.eval())
def testCondWithControl(self):
with self.test_session() as sess:
control_holder = array_ops.placeholder(dtypes.float32, shape=())
a = constant_op.constant(3)
def true_branch():
with ops.control_dependencies([control_holder]):
_ = a + 1
return a + 2
r = control_flow_ops.cond(
constant_op.constant(True), true_branch,
lambda: constant_op.constant(1))
self.assertEqual(5, r.eval())
def testUninitializedRefIdentity(self):
with self.test_session() as sess:
v = gen_state_ops._variable(
shape=[1],
dtype=dtypes.float32,
name="v",
container="",
shared_name="")
inited = state_ops.is_variable_initialized(v)
v_f, v_t = control_flow_ops.ref_switch(v, inited)
# Both v_f and v_t are uninitialized references. However, an actual use
# of the reference in the 'true' branch in the 'tf.identity' op will
# not 'fire' when v is uninitialized, so this is a valid construction.
# This test tests that _ref_identity allows uninitialized ref as input
# so that this construction is allowed.
v_f_op = gen_array_ops._ref_identity(v_f)
v_t_op = gen_array_ops._ref_identity(v_t)
with ops.control_dependencies([v_f_op]):
assign_v = state_ops.assign(v, [1.0])
with ops.control_dependencies([v_t_op]):
orig_v = array_ops.identity(v)
merged_op = control_flow_ops.merge([assign_v, orig_v])
self.assertAllEqual([1.0], sess.run(merged_op.output))
def testCondSwitchIdentity(self):
# Make sure the recv identity is not removed by optimization.
with session.Session(config=opt_cfg()) as sess:
pred = constant_op.constant(True)
def fn1():
return control_flow_ops.no_op()
def fn2():
return control_flow_ops.Assert(False, ["Wrong branch!!!"])
r = control_flow_ops.cond(pred, fn1, fn2)
sess.run(r)
def testCondRecvIdentity(self):
# Make sure the switch identity is not removed by optimization.
with session.Session(config=opt_cfg()) as sess:
with ops.device(test.gpu_device_name()):
pred = constant_op.constant(True)
def fn1():
return control_flow_ops.no_op()
def fn2():
with ops.device("/cpu:0"):
return control_flow_ops.Assert(False, ["Wrong branch!!!"])
r = control_flow_ops.cond(pred, fn1, fn2)
sess.run(r)
def testCondGrad_1(self):
with self.test_session():
x = constant_op.constant(10.0, name="x")
pred = math_ops.less(1, 2)
fn1 = lambda: array_ops.identity(x)
fn2 = lambda: array_ops.identity(x)
r = control_flow_ops.cond(pred, fn1, fn2)
grad = gradients_impl.gradients(r, [x])[0]
result = grad.eval()
self.assertAllEqual(1.0, result)
def testCondGrad_2(self):
with self.test_session():
c = array_ops.placeholder(dtypes.int32, shape=[])
x = constant_op.constant(10.0)
pred = math_ops.less(c, 2)
fn1 = lambda: math_ops.multiply(x, 42.0)
fn2 = lambda: math_ops.multiply(x, 3.0)
r = control_flow_ops.cond(pred, fn1, fn2)
grad = gradients_impl.gradients(r, [x])[0]
self.assertAllEqual(42.0, grad.eval(feed_dict={c: 1}))
self.assertAllEqual(3.0, grad.eval(feed_dict={c: 3}))
def testNestedCond_Simple(self):
with self.test_session():
x = constant_op.constant(0., name="X")
y = control_flow_ops.cond(
constant_op.constant(True), lambda: x,
lambda: control_flow_ops.cond(x < 1., lambda: x, lambda: x))
result = gradients_impl.gradients(y, x)[0]
self.assertEqual(1.0, result.eval())
z = control_flow_ops.cond(
constant_op.constant(False), lambda: x,
lambda: control_flow_ops.cond(x < 1., lambda: x, lambda: x))
result = gradients_impl.gradients(z, x)[0]
self.assertEqual(1.0, result.eval())
def testCondGrad_Gather(self):
with self.test_session() as sess:
v1 = variables.Variable([1.0, 42.0])
c = array_ops.placeholder(dtypes.int32, shape=[])
pred = math_ops.less(c, 2)
fn1 = lambda: array_ops.identity(v1)
fn2 = lambda: array_ops.gather(v1, [1, 1])
r = control_flow_ops.cond(pred, fn1, fn2)
grad = gradients_impl.gradients(r, [v1])[0]
variables.global_variables_initializer().run()
# Should just be [1, 1], but possibly a sparse representation
gv, gi = sess.run([grad.values, grad.indices], feed_dict={c: 1})
dense_gv = [
sum([y for (x, y) in zip(gi, gv) if x == i]) for i in range(2)
]
self.assertAllEqual(dense_gv, [1.0, 1.0])
# Should be [0, 2], as the else forwards v1[1] twice
gv, gi = sess.run([grad.values, grad.indices], feed_dict={c: 3})
dense_gv = [
sum([y for (x, y) in zip(gi, gv) if x == i]) for i in range(2)
]
self.assertAllEqual(dense_gv, [0.0, 2.0])
# Microbenchmark: 256,000 iterations/s.
def testWhile_1(self):
with self.test_session():
n = constant_op.constant(0)
c = lambda x: math_ops.less(x, 10000)
b = lambda x: math_ops.add(x, 1)
r = control_flow_ops.while_loop(c, b, [n], parallel_iterations=20)
self.assertEqual(10000, r.eval())
def testWhileWithRefs_1(self):
with self.test_session() as sess:
x = variables.Variable(0)._ref() # pylint: disable=protected-access
i = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 100)
self.assertEqual(x.dtype, dtypes.int32_ref)
def b(i, x):
self.assertEqual(x.dtype, dtypes.int32_ref)
return (i + 1, gen_array_ops._ref_identity(x))
r = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=5)
variables.global_variables_initializer().run()
self.assertEqual(r[0].dtype, dtypes.int32)
self.assertEqual(r[1].dtype, dtypes.int32_ref)
value_i, value_x = sess.run(r)
self.assertEqual(100, value_i)
self.assertEqual(0, value_x)
def testWhile_2(self):
with self.test_session():
s = constant_op.constant(0)
r = isum(s)
self.assertAllEqual(45, r.eval())
def testWhileWithMaximumIterations(self):
with self.test_session():
s = constant_op.constant([1, 2, 3, 4, 5])
r = isum(s, maximum_iterations=3)
self.assertAllEqual([1+3, 2+3, 3+3, 4+3, 5+3], r.eval())
def testWhileWithMaximumIterationsAndSingleArgument(self):
with self.test_session():
r = control_flow_ops.while_loop(
lambda i: i < 3,
lambda i: i + 1,
[0],
maximum_iterations=1)
self.assertEqual(1, r.eval())
# Have more than 10 parallel iterations and hence exercise k-bound
# most of the time.
def testWhile_3(self):
with self.test_session():
def compute(i, m, c, o):
m, c = [math_ops.add(m, 1), math_ops.add(c, 1)]
o = math_ops.add(o, m)
o = math_ops.add(o, c)
i = math_ops.add(i, 1)
return [i, m, c, o]
i = ops.convert_to_tensor(0)
m = ops.convert_to_tensor(0)
c = ops.convert_to_tensor(0)
o = ops.convert_to_tensor(0)
d = ops.convert_to_tensor(100)
r = control_flow_ops.while_loop(lambda i, m, c, o: math_ops.less(i, d),
compute, [i, m, c, o])
result = r[3].eval()
self.assertAllEqual(10100, result)
def testWhile_4(self):
with self.test_session():
def compute(i, m, c, o):
m, c = [array_ops.gather(x, i), array_ops.gather(x, i)]
o = math_ops.add(o, m)
o = math_ops.add(o, c)
i = math_ops.add(i, 1)
return [i, m, c, o]
i = ops.convert_to_tensor(0)
m = ops.convert_to_tensor(0)
c = ops.convert_to_tensor(0)
o = ops.convert_to_tensor(0)
x = ops.convert_to_tensor([1, 2, 3, 4, 5, 6])
s = array_ops.size(x)
r = control_flow_ops.while_loop(lambda i, m, c, o: math_ops.less(i, s),
compute, [i, m, c, o])
result = r[3].eval()
self.assertAllEqual(42, result)
def testWhile_5(self):
with self.test_session():
def compute(i, c, o):
c = array_ops.strided_slice(x,
array_ops.expand_dims(i, 0),
[1] + array_ops.expand_dims(i, 0))
o = array_ops.concat([o, c], 0)
i = math_ops.add(i, 1)
return [i, c, o]
i = ops.convert_to_tensor(0)
c = ops.convert_to_tensor([0])
o = ops.convert_to_tensor([0])
x = ops.convert_to_tensor([1, 2, 3, 4, 5, 6])
s = array_ops.size(x)
r = control_flow_ops.while_loop(
lambda i, c, o: math_ops.less(i, s), compute, [i, c, o], [
i.get_shape(), tensor_shape.unknown_shape(),
tensor_shape.unknown_shape()
])
result = r[2].eval()
self.assertAllEqual(np.array([0, 1, 2, 3, 4, 5, 6]), result)
def testBufferForwarding(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with self.test_session() as sess:
with ops.device("/cpu:0"):
c = constant_op.constant(2)
i0 = constant_op.constant(0)
r = control_flow_ops.while_loop(lambda i: i < 1000,
lambda i: math_ops.square(c) + i, [i0])
r_val = sess.run(r, options=run_options, run_metadata=run_metadata)
self.assertEqual(1000, r_val)
self.assertTrue(run_metadata.HasField("step_stats"))
unique_allocs = set()
for node_stat in run_metadata.step_stats.dev_stats[0].node_stats:
for output in node_stat.output:
unique_allocs.add(
output.tensor_description.allocation_description.ptr)
# Prior to cl/147536680, the number of unique allocations was about 1005.
self.assertLess(len(unique_allocs), 756)
def _testWhile_Gpu_1(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
n = constant_op.constant(1.0)
c = lambda x: math_ops.less(x, 10.0)
b = lambda x: math_ops.add(x, 1.0)
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllClose(10.0, r.eval())
def testWhile_Gpu_1(self):
self._testWhile_Gpu_1(use_gpu=False)
self._testWhile_Gpu_1(use_gpu=True)
def _testWhile_Gpu_2(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
n = constant_op.constant(1.0)
c = lambda x: math_ops.less(x, 10.0)
def b(x):
with ops.device("/cpu:0"):
return math_ops.add(x, 1.0)
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllClose(10.0, r.eval())
def testWhile_Gpu_2(self):
self._testWhile_Gpu_1(use_gpu=False)
self._testWhile_Gpu_1(use_gpu=True)
def testWhileShape(self):
with self.test_session():
i = constant_op.constant(0)
m = array_ops.ones([2, 2])
c = lambda i, j: math_ops.less(i, 2)
def _b(i, j):
new_i = math_ops.add(i, 1)
new_j = array_ops.tile(j, [2, 2])
return [new_i, new_j]
r = control_flow_ops.while_loop(
c, _b, [i, m], [i.get_shape(), tensor_shape.unknown_shape()])
r = r[1] * array_ops.ones([8, 8])
self.assertAllEqual(np.ones((8, 8)), r.eval())
def testWhileWithNonTensorInput_Scalar(self):
with self.test_session():
n = 0
c = lambda x: x < 10000
b = lambda x: x + 1
r = control_flow_ops.while_loop(c, b, [n], parallel_iterations=20)
self.assertEqual(10000, r.eval())
def testWhileWithNonTensorInput_Vector(self):
with self.test_session():
n = np.array([0]) # Note, [0] would not work here; that is a list
c = lambda x: x[0] < 10000
b = lambda x: array_ops.stack([x[0] + 1])
r = control_flow_ops.while_loop(c, b, [n], parallel_iterations=20)
self.assertEqual([10000], r.eval())
def testWhileShapeInference(self):
with self.test_session():
i = constant_op.constant(0)
m = array_ops.ones([2, 2])
c = lambda i, j: math_ops.less(i, 2)
def b(i, j):
new_i = math_ops.add(i, 1)
new_j = array_ops.concat([j, j], 0)
return [new_i, new_j]
r = control_flow_ops.while_loop(
c, b, [i, m], [i.get_shape(), tensor_shape.TensorShape([None, 2])])
self.assertTrue(r[1].get_shape()[0].value is None)
self.assertEqual(r[1].get_shape()[1], tensor_shape.Dimension(2))
with self.assertRaisesRegexp(
ValueError,
r"The shape for while_1/Merge_1:0 is not an invariant for the loop. "
r"It enters the loop with shape \(2, 2\), but has shape \(4, 2\) "
r"after one iteration. Provide shape invariants using either the "
r"`shape_invariants` argument of tf.while_loop or set_shape\(\) on "
r"the loop variables."):
r = control_flow_ops.while_loop(c, b, [i, m])
def testWhileShapeInferenceSparseTensor(self):
with self.test_session():
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant(
[[0], [3]], dtype=dtypes.int64, name="indices")
shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape")
i = constant_op.constant(0)
x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
def c(i, _):
return i < 10
def b(i, x):
return [
i + 1, sparse_tensor.SparseTensor(x.indices, x.values * 2.0,
x.dense_shape)
]
_, r = control_flow_ops.while_loop(c, b, [i, x])
self.assertEqual(r.dense_shape.get_shape()[0].value, 1)
_, r = control_flow_ops.while_loop(
c, b, [i, x], [i.get_shape(), tensor_shape.TensorShape([None])])
self.assertTrue(r.dense_shape.get_shape()[0].value is None)
with self.assertRaisesRegexp(ValueError, "is not compatible with"):
_, r = control_flow_ops.while_loop(
c, b, [i, x], [i.get_shape(), tensor_shape.TensorShape([5])])
def testWhileShapeInferenceIndexedSlices(self):
with self.test_session():
values = constant_op.constant([[2.0, 4.0], [3.0, 5.0]], name="values")
indices = constant_op.constant([0, 3], name="indices")
shape = constant_op.constant([10, 2], name="dense_shape")
i = constant_op.constant(0)
x = ops.IndexedSlices(values, indices, dense_shape=shape)
def c(i, _):
return i < 10
def b(i, x):
return [
i + 1, ops.IndexedSlices(x.values * 2.0, x.indices, x.dense_shape)
]
_, r = control_flow_ops.while_loop(c, b, [i, x])
self.assertEqual(r.dense_shape.get_shape()[0].value, 2)
self.assertEqual(r.values.get_shape(), tensor_shape.TensorShape([2, 2]))
_, r = control_flow_ops.while_loop(
c, b, [i, x], [i.get_shape(), tensor_shape.TensorShape([None, 2])])
self.assertEqual(r.dense_shape.get_shape()[0].value, 2)
self.assertTrue(r.values.get_shape()[0].value is None)
self.assertEqual(r.values.get_shape()[1].value, 2)
with self.assertRaisesRegexp(ValueError, "is not compatible with"):
_, r = control_flow_ops.while_loop(
c, b, [i, x], [i.get_shape(), tensor_shape.TensorShape([None, 5])])
def _testNestedWhile_1(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
n = constant_op.constant(0)
def cpu_sum(s):
c = lambda i, s: math_ops.less(i, 10)
def b(i, s):
i1 = math_ops.add(i, 1)
with ops.device("/cpu:0"):
s1 = math_ops.add(i, s)
return i1, s1
_, r_s = control_flow_ops.while_loop(c, b, [n, s])
return r_s
c = lambda x: math_ops.less(x, 200)
b = lambda x: math_ops.add(x, cpu_sum(n))
r = control_flow_ops.while_loop(c, b, [n])
self.assertEqual(225, r.eval())
def testNestedWhile_1(self):
self._testNestedWhile_1(use_gpu=False)
self._testNestedWhile_1(use_gpu=True)
def _testNestedWhile_2(self, use_gpu):
# Test the cases that A -> Enter and Exit -> A are partitioned.
with self.test_session(use_gpu=use_gpu):
s0 = constant_op.constant(2.0)
def inner_loop(s):
c = lambda s: math_ops.less(s, 20.0)
def b(s):
s1 = math_ops.add(s, s)
return s1
r_s = control_flow_ops.while_loop(c, b, [s], parallel_iterations=1)
return r_s
outer_c = lambda x: math_ops.less(x, 3000.0)
def outer_b(x):
x = logging_ops.Print(x, [x]) # Edge "Print -> Enter" is partitioned
x = inner_loop(x)
with ops.device("/cpu:0"):
x = math_ops.square(x) # Edge "Exit -> Square" is partitioned
return x
r = control_flow_ops.while_loop(
outer_c, outer_b, [s0], parallel_iterations=1)
self.assertEqual(1048576.0, r.eval())
def testNestedWhile_2(self):
self._testNestedWhile_2(use_gpu=False)
self._testNestedWhile_2(use_gpu=True)
def testWhileWithControl_1(self):
with self.test_session():
n = constant_op.constant(0)
r = constant_op.constant(0)
condition = lambda n_, r_: math_ops.less(n_, 10)
def body(n_, r_):
n_ = math_ops.add(n_, 1)
with r_.graph.control_dependencies([r_]):
r_ = constant_op.constant(12)
return [n_, r_]
res = control_flow_ops.while_loop(
condition, body, [n, r], parallel_iterations=1)
self.assertAllEqual(12, res[1].eval())
def testWhileWithControl_2(self):
with self.test_session():
r = constant_op.constant(0)
condition = lambda r_: math_ops.less(r_, 10)
def body(r_):
with r_.graph.control_dependencies([r_]):
r_ = constant_op.constant(12)
return [r_]
res = control_flow_ops.while_loop(
condition, body, [r], parallel_iterations=1)
self.assertAllEqual(12, res.eval())
def testWhileWithControl_3(self):
with self.test_session() as sess:
b = array_ops.placeholder(dtypes.bool)
c = constant_op.constant(1)
x0 = constant_op.constant(0)
with ops.control_dependencies([b]):
r = control_flow_ops.while_loop(lambda x: x < 10, lambda x: x + c, [x0])
self.assertEqual(10, sess.run(r, {b: True}))
def testWhileWithControl_4(self):
with self.test_session() as sess:
b = array_ops.placeholder(dtypes.bool)
c = constant_op.constant(1)
x0 = constant_op.constant(0)
with ops.control_dependencies([b]):
r = control_flow_ops.while_loop(
lambda x: x < 10, lambda x: x + array_ops.identity(c), [x0])
self.assertEqual(10, sess.run(r, {b: True}))
def testWhileWithControl_5(self):
with self.test_session() as sess:
b = array_ops.placeholder(dtypes.bool)
c = constant_op.constant(1)
x0 = constant_op.constant(0)
def body(x):
with ops.control_dependencies([b]):
return x + c
r = control_flow_ops.while_loop(lambda x: x < 10, body, [x0])
self.assertEqual(10, sess.run(r, {b: True}))
def testWhileCondWithControl(self):
# Ensure that no control edges by an outer control dependency context are
# added to nodes inside cond/while contexts.
with self.test_session() as sess:
const_true = lambda: constant_op.constant(True)
const_false = lambda: constant_op.constant(False)
cond = lambda i: control_flow_ops.cond(i > 0, const_true, const_false)
body = lambda i: control_flow_ops.cond(i > 0, lambda: i - 1, lambda: i)
with ops.control_dependencies([control_flow_ops.no_op()]):
loop = control_flow_ops.while_loop(cond, body,
(constant_op.constant(5),))
self.assertEqual(0, sess.run(loop))
def testWhileCondWithControl_1(self):
with self.test_session():
v = variable_scope.get_variable(
"v", [], initializer=init_ops.constant_initializer(2))
i0 = constant_op.constant(0)
with ops.control_dependencies([i0]):
def loop_condition(i):
return i < 4
def loop_body(i):
some_cond = control_flow_ops.cond(
constant_op.constant(True),
lambda: state_ops.assign(v, math_ops.square(v)),
lambda: v)
with ops.control_dependencies([some_cond]):
return i + 1
r = control_flow_ops.while_loop(loop_condition, loop_body, (i0,))
variables.global_variables_initializer().run()
self.assertEqual(4, r.eval())
self.assertAllClose(65536.0, v.eval())
def testWhileCondExitControl(self):
with self.test_session():
v = variables.Variable(1)
def false_branch():
cond = lambda i: i < 100
def body(i):
x = state_ops.assign(v, i)
return x + 1
loop = control_flow_ops.while_loop(cond, body, [0])
# Make sure to handle correctly control edge from Exit to a node.
with ops.control_dependencies([loop]):
return constant_op.constant(6.0)
r = control_flow_ops.cond(
constant_op.constant(False), lambda: constant_op.constant(1.0),
false_branch)
variables.global_variables_initializer().run()
self.assertEqual(6.0, r.eval())
self.assertEqual(99, v.eval())
def testCondWhile_1(self):
with self.test_session():
n = ops.convert_to_tensor(0, name="n")
c = lambda x: math_ops.less(x, 10)
b = lambda x: math_ops.add(x, 1)
r = control_flow_ops.cond(
math_ops.less(0, 1), lambda: control_flow_ops.while_loop(c, b, [n]),
lambda: n)
self.assertAllEqual(10, r.eval())
def testCondWhile_2(self):
with self.test_session():
n = ops.convert_to_tensor(0)
c = lambda x: math_ops.less(x, 10)
b = lambda x: math_ops.add(x, 1)
r = control_flow_ops.cond(
math_ops.less(1, 0), lambda: math_ops.add(n, 1),
lambda: control_flow_ops.while_loop(c, b, [n]))
self.assertAllEqual(10, r.eval())
def _testCondWhile_3(self, use_gpu):
with self.test_session(use_gpu=use_gpu) as sess:
p = array_ops.placeholder(dtypes.bool)
n = constant_op.constant(0.0)
def c(x):
return math_ops.less(x, 10.0)
def b(x):
with ops.device("/cpu:0"):
x1 = math_ops.add(x, 1.0)
return x1
r = control_flow_ops.cond(p,
lambda: control_flow_ops.while_loop(c, b, [n]),
lambda: math_ops.multiply(n, 2.0))
r1 = gradients_impl.gradients(r, [n])
self.assertEqual(10, sess.run(r, {p: True}))
self.assertEqual([1.0], sess.run(r1, {p: True}))
self.assertEqual(0.0, sess.run(r, {p: False}))
self.assertEqual([2.0], sess.run(r1, {p: False}))
def testCondWhile_3(self):
self._testCondWhile_3(use_gpu=False)
self._testCondWhile_3(use_gpu=True)
def testWhileCond_1(self):
with self.test_session():
i = ops.convert_to_tensor(0, name="i")
n = ops.convert_to_tensor(10, name="n")
one = ops.convert_to_tensor(1, name="one")
c = lambda x: math_ops.less(x, n)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(
constant_op.constant(True),
lambda: math_ops.add(x, one), lambda: math_ops.subtract(x, one))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [i])
self.assertAllEqual(10, r.eval())
def testWhileCond_2(self):
with self.test_session():
n = ops.convert_to_tensor(0, name="n")
c = lambda x: math_ops.less(x, 10)
b = lambda x: control_flow_ops.cond(constant_op.constant(True), lambda: math_ops.add(x, 1), lambda: n)
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllEqual(10, r.eval())
def testWhileCond_3(self):
with self.test_session():
n = ops.convert_to_tensor(0)
c = lambda x: math_ops.less(x, 10)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(math_ops.less(0, 1),
lambda: math_ops.add(x, 1),
lambda: math_ops.subtract(x, 1))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllEqual(10, r.eval())
# NOTE: It is ok to have parallel_iterations > 1
def testWhileUpdateVariable_1(self):
with self.test_session():
select = variables.Variable([3.0, 4.0, 5.0])
n = constant_op.constant(0)
def loop_iterator(j):
return math_ops.less(j, 3)
def loop_body(j):
ns = state_ops.scatter_update(select, j, 10.0)
nj = math_ops.add(j, 1)
op = control_flow_ops.group(ns)
nj = control_flow_ops.with_dependencies([op], nj)
return [nj]
r = control_flow_ops.while_loop(
loop_iterator, loop_body, [n], parallel_iterations=1)
variables.global_variables_initializer().run()
self.assertEqual(3, r.eval())
result = select.eval()
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result)
def testWhileUpdateVariable_2(self):
with self.test_session():
select1 = variables.Variable([3.0, 4.0, 5.0])
select2 = variables.Variable([3.0, 4.0, 5.0])
n = constant_op.constant(0)
def loop_iterator(j):
return math_ops.less(j, 3)
def loop_body(j):
ns1 = state_ops.scatter_update(select1, j, 10.0)
ns2 = state_ops.scatter_update(select2, j, 10.0)
nj = math_ops.add(j, 1)
op = control_flow_ops.group(ns1, ns2)
nj = control_flow_ops.with_dependencies([op], nj)
return [nj]
r = control_flow_ops.while_loop(
loop_iterator, loop_body, [n], parallel_iterations=1)
variables.global_variables_initializer().run()
self.assertEqual(3, r.eval())
result1 = select1.eval()
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result1)
result2 = select2.eval()
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result2)
def testWhileUpdateVariable_3(self):
with self.test_session():
select = variables.Variable([3.0, 4.0, 5.0])
n = constant_op.constant(0)
def loop_iterator(j, _):
return math_ops.less(j, 3)
def loop_body(j, _):
ns = state_ops.scatter_update(select, j, 10.0)
nj = math_ops.add(j, 1)
return [nj, ns]
r = control_flow_ops.while_loop(
loop_iterator,
loop_body, [n, array_ops.identity(select)],
parallel_iterations=1)
variables.global_variables_initializer().run()
result = r[1].eval()
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result)
# b/24814703
def testWhileUpdateVariable_4(self):
with self.test_session():
var_a = variables.Variable(0, name="a")
var_b = variables.Variable(0, name="b")
variables.global_variables_initializer().run()
c = constant_op.constant(0, name="c")
asn1 = state_ops.assign_add(var_a, 1, name="a_add")
# Loop condition
def pred(i):
return math_ops.less(i, 10)
# Loop body
def loop_body(i):
asn2 = state_ops.assign_add(var_b, asn1, name="b_add")
with ops.control_dependencies([asn2]):
ni = math_ops.add(i, 1, name="i_add")
return ni
lpa = control_flow_ops.while_loop(
pred, loop_body, [c], parallel_iterations=1)
self.assertEqual(0, var_b.eval())
lpa.eval() # Run the loop
self.assertEqual(10, var_b.eval())
# b/24736492
def testWhileUpdateVariable_5(self):
with self.test_session():
# Create some variables.
var_a = variables.Variable(0, name="a")
var_b = variables.Variable(0, name="b")
variables.global_variables_initializer().run()
# Change condition to check var_b
def pred(_):
return math_ops.less(var_b, 10)
# Change body to increment var_b
def loop_body(i):
asn1 = state_ops.assign_add(
var_a, constant_op.constant(1), name="a_add")
asn2 = state_ops.assign_add(
var_b, constant_op.constant(1), name="b_add")
with ops.control_dependencies([asn1, asn2]):
inc_b = array_ops.identity(var_b)
return inc_b
lpa = control_flow_ops.while_loop(
pred, loop_body, [var_b], parallel_iterations=1, name="loop")
self.assertEqual(0, var_b.eval())
lpa.eval() # Run the loop
self.assertEqual(10, var_a.eval())
self.assertEqual(10, var_b.eval())
# b/24814668
def testWhileUpdateVariable_6(self):
with self.test_session():
# Create some variables.
var_a = variables.Variable(0, name="a")
var_b = variables.Variable(0, name="b")
c = constant_op.constant(0)
variables.global_variables_initializer().run()
# Loop condition
def pred(i):
return math_ops.less(i, 10)
# Loop body
def loop_body(i):
asn1 = state_ops.assign_add(var_a, 1, name="a_add")
with ops.control_dependencies([asn1]):
asn2 = state_ops.assign_add(var_b, var_a, name="b_add")
with ops.control_dependencies([asn2]):
ni = math_ops.add(i, 1, name="i_add")
return ni
lpa = control_flow_ops.while_loop(
pred, loop_body, [c], parallel_iterations=1, name="loop")
self.assertEqual(0, var_b.eval())
lpa.eval() # Run the loop
self.assertEqual(55, var_b.eval())
self.assertEqual(10, var_a.eval())
def testWhileQueue_1(self):
with self.test_session():
q = data_flow_ops.FIFOQueue(-1, dtypes.int32)
i = constant_op.constant(0)
def c(i):
return math_ops.less(i, 10)
def b(i):
ni = math_ops.add(i, 1)
ni = control_flow_ops.with_dependencies([q.enqueue((i,))], ni)
return ni
r = control_flow_ops.while_loop(c, b, [i], parallel_iterations=1)
self.assertEqual([10], r.eval())
for i in xrange(10):
self.assertEqual([i], q.dequeue().eval())
def testWhileStack_1(self):
with self.test_session():
s = gen_data_flow_ops._stack_v2(-1, dtypes.int32, stack_name="foo")
i = constant_op.constant(0)
def c(i):
return math_ops.less(i, 10)
def b(i):
ni = math_ops.add(i, 1)
ni = control_flow_ops.with_dependencies(
[gen_data_flow_ops._stack_push_v2(s, i)], ni)
return ni
r = control_flow_ops.while_loop(c, b, [i], parallel_iterations=1)
x = constant_op.constant(0)
def c1(i, _):
return math_ops.greater(i, 0)
def b1(i, x):
ni = math_ops.subtract(i, 1)
nx = x + gen_data_flow_ops._stack_pop_v2(s, dtypes.int32)
return [ni, nx]
_, rx = control_flow_ops.while_loop(
c1,
b1, [r, x], [r.get_shape(), tensor_shape.unknown_shape()],
parallel_iterations=1)
self.assertEqual(45, rx.eval())
def _testWhileGrad_ColocateGradients(self, colocate):
gpu_dev_name = test.gpu_device_name() if test.is_gpu_available(
) else "/device:GPU:0"
graph = ops.Graph()
with graph.as_default():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
def b(x):
with ops.device(gpu_dev_name):
return math_ops.square(x)
loop = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = gradients_impl.gradients(
loop, v, colocate_gradients_with_ops=colocate)[0]
r_ops = graph.get_operations()
r_devices = [(op.name, op.device) for op in r_ops]
self.assertTrue(any("Square" in op.name for op in r_ops))
for (name, dev) in r_devices:
if not colocate and name.endswith("Square"):
# Only forward graph contain gpu in Square device
self.assertTrue(gpu_dev_name in dev)
elif colocate and "Square" in name:
# Forward and backward graphs contain gpu in Square/Square_grad devices
self.assertTrue(gpu_dev_name in dev)
else:
self.assertFalse(gpu_dev_name in dev)
with self.test_session(graph=graph) as sess:
self.assertAllClose(1024.0, sess.run(r))
def testWhileGrad_ColocateGradients(self):
self._testWhileGrad_ColocateGradients(colocate=False)
self._testWhileGrad_ColocateGradients(colocate=True)
def testWhileGrad_Square(self):
with self.test_session():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = math_ops.square
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = control_flow_ops.cond(math_ops.less(1, 2), lambda: r, lambda: v)
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(1024.0, r.eval())
def testWhileGrad_Shape(self):
with self.test_session():
x = array_ops.placeholder(dtypes.float32, shape=[None])
v = constant_op.constant([2.0], name="v")
n = constant_op.constant(0, name="n")
c = lambda i, v: math_ops.less(i, 5)
b = lambda i, v: [i + 1, math_ops.multiply(x, v)]
r = control_flow_ops.while_loop(
c,
b, [n, v], [n.get_shape(), tensor_shape.unknown_shape()],
parallel_iterations=1)
r = gradients_impl.gradients(r[1], x)[0]
self.assertEqual([None], r.get_shape().as_list())
self.assertAllClose([810.0, 2560.0], r.eval(feed_dict={x: [3.0, 4.0]}))
def testWhileGrad_BaseShape(self):
with self.test_session() as sess:
x = array_ops.placeholder(dtypes.float32, [None])
v0 = constant_op.constant([2.0, 2.0], name="v")
c = lambda v: constant_op.constant(False)
b = lambda v: math_ops.multiply(v, x)
r = control_flow_ops.while_loop(c, b, [v0])
y = math_ops.square(x)
r = gradients_impl.gradients([r, y], x)[0]
self.assertAllClose([2.0, 4.0], sess.run(r, feed_dict={x: [1.0, 2.0]}))
def testWhileGrad_MultipleUses(self):
with self.test_session():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = math_ops.square
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = math_ops.multiply(r, r)
r = gradients_impl.gradients(r, v)[0]
self.assertEqual(524288.0, r.eval())
def testWhileGrad_LoopAdd(self):
with self.test_session():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = math_ops.square
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = math_ops.add(r, r)
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(2048.0, r.eval())
def _testWhileGrad_Mul(self, use_gpu, p_iters):
with self.test_session(use_gpu=use_gpu) as sess:
a = constant_op.constant(3.0, name="a")
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = lambda v: math_ops.multiply(v, a)
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=p_iters)
grad_a, grad_v = gradients_impl.gradients(r, [a, v])
grad_a_val, grad_v_val = sess.run([grad_a, grad_v])
self.assertAllClose(216.0, grad_a_val)
self.assertAllClose(81.0, grad_v_val)
def testWhileGrad_Mul(self):
self._testWhileGrad_Mul(use_gpu=False, p_iters=1)
self._testWhileGrad_Mul(use_gpu=False, p_iters=10)
self._testWhileGrad_Mul(use_gpu=True, p_iters=1)
self._testWhileGrad_Mul(use_gpu=True, p_iters=10)
def _testNestedWhileCondWhileGrad(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
v = constant_op.constant(1.0)
def inner_loop(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
c = lambda x: math_ops.less(x, 128.0)
def b(x):
return control_flow_ops.cond(
constant_op.constant(True),
lambda: math_ops.square(inner_loop(x)[1]),
lambda: math_ops.multiply(x, 2.0))
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(512.0, r.eval())
def testNestedWhileCondWhileGrad(self):
self._testNestedWhileCondWhileGrad(use_gpu=False)
self._testNestedWhileCondWhileGrad(use_gpu=True)
def testWhileGrad_Variable(self):
with self.test_session():
a = variables.Variable(3.0)
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = lambda v: math_ops.multiply(v, a)
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = gradients_impl.gradients(r, a)
variables.global_variables_initializer().run()
self.assertAllClose(216.0, r[0].eval())
def testWhileGradInCond(self):
with self.test_session():
n = ops.convert_to_tensor(1.0, name="n")
x = array_ops.placeholder(dtypes.float32, shape=None)
c = lambda n: math_ops.less(n, 10.0)
b = lambda n: math_ops.add(n, x)
def fn1():
r = control_flow_ops.while_loop(c, b, [n],
[tensor_shape.unknown_shape()])
return gradients_impl.gradients(r, x)
r = control_flow_ops.cond(math_ops.less(1, 2), fn1, lambda: x)
self.assertAllClose(9.0, r.eval(feed_dict={x: 1.0}))
def testWhileGradInWhile(self):
with self.test_session():
n = ops.convert_to_tensor(1.0, name="n")
x = array_ops.placeholder(dtypes.float32, shape=None)
c = lambda n: math_ops.less(n, 10.0)
b = lambda n: math_ops.add(n, x)
def b1(n):
r = control_flow_ops.while_loop(c, b, [n],
[tensor_shape.unknown_shape()])
return gradients_impl.gradients(r, x)
r = control_flow_ops.while_loop(lambda n: n < 6.0, b1, [n],
[tensor_shape.unknown_shape()])
self.assertAllClose(9.0, r.eval(feed_dict={x: 1.0}))
def testWhile_NestedInput(self):
with self.test_session() as sess:
named = collections.namedtuple("named", ("a", "b"))
loop_vars = [
named(a=constant_op.constant(0.0), b=constant_op.constant(1.0)),
(constant_op.constant(2.0),
constant_op.constant(3.0)), constant_op.constant(4.0)
]
c = lambda lv0, _1, _2: lv0.a < 100.0
def b(lv0, lv1, lv2):
lv0 = named(a=lv0.a + 1, b=lv0.b)
lv1 = (lv1[0] + 1, lv1[1])
lv2 += 2
return [lv0, lv1, lv2]
r = control_flow_ops.while_loop(c, b, loop_vars)
self.assertTrue(isinstance(r, list))
self.assertTrue(isinstance(r[0], named))
self.assertTrue(isinstance(r[1], tuple))
self.assertTrue(isinstance(r[2], ops.Tensor))
r_flattened = nest.flatten(r)
self.assertEqual([100.0, 1.0, 102.0, 3.0, 4.0 + 100 * 2.0],
sess.run(r_flattened))
def testWhile_NestedBadArityFails(self):
with self.test_session():
named = collections.namedtuple("named", ("a", "b"))
loop_vars = [
named(a=constant_op.constant(0.0), b=constant_op.constant(1.0)),
(constant_op.constant(2.0),
constant_op.constant(3.0)), constant_op.constant(4.0)
]
c = lambda lv0, _1, _2: lv0.a < 100.0
def b(lv0, lv1, _):
return [lv0, lv1]
with self.assertRaisesRegexp(ValueError, "the same number of elements"):
control_flow_ops.while_loop(c, b, loop_vars)
def testWhileGrad_ys_xs(self):
with self.test_session():
x = constant_op.constant(3.0, name="x")
y = constant_op.constant(2.0, name="y")
c = lambda x, y: math_ops.less(x, 100.0)
def b(x, y):
y1 = math_ops.add(x, y)
x1 = math_ops.multiply(x, y1)
return x1, y1
rx, ry = control_flow_ops.while_loop(c, b, [x, y], parallel_iterations=1)
r = gradients_impl.gradients([rx, ry], x)
self.assertAllClose(304.0, r[0].eval())
r = gradients_impl.gradients([rx, ry], y)
self.assertAllClose(124.0, r[0].eval())
r = gradients_impl.gradients([rx], x)
self.assertAllClose(295.0, r[0].eval())
r = gradients_impl.gradients([rx], y)
self.assertAllClose(120.0, r[0].eval())
def testWhileGrad_Dependency(self):
with self.test_session():
i = constant_op.constant(0, name="i")
x = constant_op.constant(2.0, name="x")
c = lambda i, x: math_ops.less(i, 10)
def b(i, x):
x = math_ops.multiply(x, 2.0)
i = math_ops.add(i, 1)
return i, x
ri, rx = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
r = gradients_impl.gradients([ri, rx], x)
self.assertAllClose(1024.0, r[0].eval())
r = gradients_impl.gradients([rx], x)
self.assertAllClose(1024.0, r[0].eval())
def testWhileGrad_NoGradient(self):
with self.test_session():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = math_ops.square
r = control_flow_ops.while_loop(c, b, [v], back_prop=False)
r = math_ops.add(r, v)
r = gradients_impl.gradients(r, v)
self.assertAllClose(1.0, r[0].eval())
def testWhileGrad_NoDependency(self):
with self.test_session() as sess:
variable = variables.Variable(array_ops.ones([2, 3]))
duration = array_ops.zeros([], dtype=dtypes.int32)
def cond(duration, tensor, _):
del tensor
return duration < 10
def body(duration, tensor, _):
return (duration + 1, tensor, tensor)
loop_vars = [duration, variable, variable]
tensors = control_flow_ops.while_loop(
cond=cond, body=body, loop_vars=loop_vars)
cost = math_ops.reduce_sum(tensors[2])
grad = gradients_impl.gradients(cost, [variable])
variables.global_variables_initializer().run()
self.assertAllClose(np.ones([2, 3]), sess.run(grad[0]))
def testWhileGrad_Const(self):
with self.test_session() as sess:
c0 = constant_op.constant(0.0, name="c0")
c1 = constant_op.constant(1.0, name="c1")
duration = constant_op.constant(0, name="t")
def cond(duration, _):
return duration < 1
def body(duration, _):
return duration + 1, c1
loop_vars = [duration, c0]
tensors = control_flow_ops.while_loop(
cond=cond, body=body, loop_vars=loop_vars)
cost = math_ops.reduce_sum(tensors[1])
grad = gradients_impl.gradients(cost, [c0])
self.assertAllClose(0.0, sess.run(grad[0]))
def testWhileGrad_SerialTwoLoops(self):
with self.test_session():
i = constant_op.constant(0, name="i")
x = constant_op.constant(2.0, name="x")
c = lambda i, x: math_ops.less(i, 5)
def b(i, x):
x = math_ops.multiply(x, 2.0)
i = math_ops.add(i, 1)
return i, x
_, rx = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
_, rx = control_flow_ops.while_loop(c, b, [i, rx], parallel_iterations=1)
r = gradients_impl.gradients([rx], x)
self.assertAllClose(1024.0, r[0].eval())
def testWhileGrad_ParallelTwoLoops(self):
with self.test_session():
i = constant_op.constant(0, name="i")
x = constant_op.constant(2.0, name="x")
c = lambda i, x: math_ops.less(i, 5)
def b(i, x):
x = math_ops.multiply(x, 2.0)
i = math_ops.add(i, 1)
return i, x
_, r1 = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
_, r2 = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
rx = math_ops.add(r1, r2)
r = gradients_impl.gradients([rx], x)
self.assertAllClose(64.0, r[0].eval())
def testWhileGrad_OneOutputWithControlDependencyOnSecond(self):
with self.test_session():
i = constant_op.constant(0, name="i")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(1.0, name="y")
c = lambda i, *_: math_ops.less(i, 1, name="cond_less")
def b(i, xi, yi):
# return (i + 1, xi, xi + yi)
return (math_ops.add(i, 1, name="inc"), array_ops.identity(
xi, name="xi"), math_ops.add(xi, yi, name="xi_plus_yi"))
_, x_f, y_f = control_flow_ops.while_loop(c, b, [i, x, y])
with ops.control_dependencies([x_f]):
y_f_d = array_ops.identity(y_f, name="y_f_d")
self.assertAllClose(2.0, y_f_d.eval()) # y_f_d = 1.0 + 1.0
g = gradients_impl.gradients([y_f_d], [x])[0]
self.assertTrue(g is not None)
self.assertAllClose(1.0, g.eval()) # y_f_d = x + 1.0, dy_f_d/dx = 1.0
def _testNestedWhileGrad_Simple(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
v = constant_op.constant(1.0)
def inner_loop(s):
c = lambda x: math_ops.less(x, 4.0)
b = lambda x: math_ops.multiply(x, 2.0)
return control_flow_ops.while_loop(c, b, [s])
c = lambda x: math_ops.less(x, 2.0)
b = lambda x: math_ops.multiply(inner_loop(x), 2.0)
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(8.0, r.eval())
def testNestedWhileGrad_Simple(self):
self._testNestedWhileGrad_Simple(use_gpu=False)
self._testNestedWhileGrad_Simple(use_gpu=True)
def testNestedWhileGrad_SerialInner(self):
with self.test_session():
v = constant_op.constant(1.0)
def inner_loop1(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
def inner_loop2(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
c = lambda x: math_ops.less(x, 128.0)
b = lambda x: inner_loop2(inner_loop1(x)[1])[1]
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(256.0, r.eval())
def testNestedWhileGrad_ParallelInner(self):
with self.test_session():
v = constant_op.constant(1.0)
def inner_loop1(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
def inner_loop2(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
c = lambda x: math_ops.less(x, 128.0)
b = lambda x: math_ops.multiply(inner_loop1(x)[1], inner_loop2(x)[1])
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(512.0, r.eval())
def testNestedWhileGrad_ParallelIterations(self):
# Make sure the stack pushes and pops of an inner loop are executed in
# the sequential order of the iterations of its outer loop.
with self.test_session() as sess:
def inner_loop(t):
fn = lambda n: n + math_ops.square(var)
return functional_ops.map_fn(fn=fn, elems=t, parallel_iterations=10)
def outer_loop(inp):
return functional_ops.map_fn(
fn=inner_loop, elems=inp, parallel_iterations=10)
var = variables.Variable(constant_op.constant(3.0))
inp = constant_op.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
res = outer_loop(inp)
optimizer = adam.AdamOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(math_ops.reduce_mean(math_ops.square(res)))
sess.run(variables.global_variables_initializer())
sess.run(train_op)
self.assertAllClose(2.999, var.eval())
def _testWhileCondGrad_Simple(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
v = ops.convert_to_tensor(2.0, name="v")
n = ops.convert_to_tensor(100.0, name="n")
one = ops.convert_to_tensor(1.0, name="one")
c = lambda x: math_ops.less(x, n)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(constant_op.constant(True),
lambda: math_ops.square(x),
lambda: math_ops.subtract(x, one))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(1024.0, r.eval())
def testWhileCondGrad_Simple(self):
self._testWhileCondGrad_Simple(use_gpu=False)
self._testWhileCondGrad_Simple(use_gpu=True)
def testWhileCondGrad_UnknownShape(self):
with self.test_session() as sess:
v = array_ops.placeholder(dtypes.float32)
n = ops.convert_to_tensor(100.0, name="n")
one = ops.convert_to_tensor(1.0, name="one")
c = lambda x: math_ops.less(x, n)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(constant_op.constant(True),
lambda: math_ops.square(x),
lambda: math_ops.subtract(x, one))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
r = sess.run(r, feed_dict={v: 2.0})
self.assertAllClose(1024.0, r)
def testWhileGrad_Concat(self):
with self.test_session() as sess:
x = variable_scope.get_variable("x", initializer=[[1., 2.]])
i0 = constant_op.constant(0)
h0 = array_ops.zeros([0, 2])
def condition(i, _):
return i < 2
def body(i, h):
return i + 1, array_ops.concat([h, x], 0)
_, h = control_flow_ops.while_loop(
condition, body, [i0, h0],
[i0.get_shape(), tensor_shape.TensorShape([None, 2])])
s = math_ops.reduce_sum(h)
sess.run(variables.global_variables_initializer())
optimizer = gradient_descent.GradientDescentOptimizer(0.01)
op = optimizer.minimize(s)
sess.run(op)
self.assertAllClose([[0.98000002, 1.98000002]], sess.run(x))
def testWhileWithRefsWithGradients_1(self):
with self.test_session() as sess:
x = variables.Variable(0)._ref() # pylint: disable=protected-access
i = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 10)
self.assertEqual(x.dtype, dtypes.int32_ref)
# pylint: disable=protected-access
def body(i, x):
self.assertEqual(x.dtype, dtypes.int32_ref)
return [i + 1, gen_array_ops._ref_identity(x)]
# pylint: enable=protected-access
r = control_flow_ops.while_loop(c, body, [i, x], parallel_iterations=5)
grad_ys = [variables.Variable(73)._ref()] # pylint: disable=protected-access
grad = gradients_impl.gradients([r[1]], [x], grad_ys=grad_ys)
variables.global_variables_initializer().run()
self.assertEqual(r[0].dtype, dtypes.int32)
self.assertEqual(r[1].dtype, dtypes.int32_ref)
value_i, value_x, value_x_grad = sess.run(r + grad)
self.assertEqual(10, value_i)
self.assertEqual(0, value_x)
self.assertEqual(73, value_x_grad)
def testWhileGrad_IndexedSlices(self):
with self.test_session():
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant([0, 3], name="indices")
shape = constant_op.constant([10], name="dense_shape")
i = constant_op.constant(0)
x = ops.IndexedSlices(values, indices, dense_shape=shape)
def c(i, _):
return i < 10
def b(i, x):
return [
i + 1, ops.IndexedSlices(x.values * 2.0, x.indices, x.dense_shape)
]
_, r = control_flow_ops.while_loop(c, b, [i, x])
r = gradients_impl.gradients(r.values, values)[0]
self.assertAllClose(np.array([1024.0, 1024.0]), r.eval())
def testWhileGrad_SparseTensor(self):
with self.test_session():
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant(
[[0], [3]], dtype=dtypes.int64, name="indices")
shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape")
i = constant_op.constant(0)
x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
def c(i, _):
return i < 10
def b(i, x):
return [
i + 1, sparse_tensor.SparseTensor(x.indices, x.values * 2.0,
x.dense_shape)
]
_, r = control_flow_ops.while_loop(c, b, [i, x])
r = gradients_impl.gradients(r.values, values)[0]
self.assertAllClose(np.array([1024.0, 1024.0]), r.eval())
def testCallGradInLoop(self):
with self.test_session() as sess:
i0 = constant_op.constant(0)
params = constant_op.constant(5.0)
params_1 = math_ops.square(params)
def c(i, _):
return i < 10
def b(i, x):
data = constant_op.constant([1.0, 2.0, 3.0])
data = math_ops.multiply(data, params_1)
x1 = x + gradients_impl.gradients(data, params)[0]
return i + 1, x1
output_grad = control_flow_ops.while_loop(c, b,
[i0, constant_op.constant(0.0)])
self.assertAllClose(600.0, sess.run(output_grad)[1])
def testWhileAndTensorArray(self):
with self.test_session() as sess:
param = constant_op.constant(2.0)
n0 = constant_op.constant(0)
y0 = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="elems")
def c(i, _):
return i < 10
def b(i, y):
return [
i + 1,
functional_ops.map_fn(lambda x: math_ops.multiply(x, param), y)
]
r = control_flow_ops.while_loop(c, b, [n0, y0], parallel_iterations=1)
r = gradients_impl.gradients(r, param)[0]
self.assertAllClose(107520.0, sess.run(r))
def testWhileGrad_StopGrad(self):
with self.test_session():
x = constant_op.constant(3.0, name="x")
y = constant_op.constant(2.0, name="y")
c = lambda x, y: math_ops.less(x, 100.0)
def b(x, y):
y1 = math_ops.square(y)
x1 = math_ops.add(math_ops.square(x), y1)
return x1, y1
rx, ry = control_flow_ops.while_loop(c, b, [x, y])
r = gradients_impl.gradients(rx, y)[0]
self.assertEqual(136.0, r.eval())
r = gradients_impl.gradients(ry, y)[0]
self.assertEqual(32.0, r.eval())
r = gradients_impl.gradients(array_ops.stop_gradient(rx), y)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(array_ops.stop_gradient(ry), y)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(
array_ops.stop_gradient(math_ops.square(rx)), y)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(
array_ops.stop_gradient(math_ops.add(rx, ry)), x)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(
array_ops.stop_gradient(math_ops.add(rx, ry)), y)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(math_ops.add(rx, ry), y)[0]
self.assertEqual(168.0, r.eval())
r = gradients_impl.gradients(
math_ops.add(rx, array_ops.stop_gradient(ry)), y)[0]
self.assertEqual(136.0, r.eval())
r = gradients_impl.gradients(
math_ops.add(array_ops.stop_gradient(rx), ry), y)[0]
self.assertEqual(32.0, r.eval())
def testWhileGrad_StopGradInside(self):
with self.test_session():
x = constant_op.constant(3.0, name="x")
y = constant_op.constant(2.0, name="y")
c = lambda x, y: math_ops.less(x, 100.0)
def b(x, y):
y1 = array_ops.stop_gradient(math_ops.square(y))
x1 = math_ops.add(math_ops.square(x), y1)
return x1, y1
rx, _ = control_flow_ops.while_loop(c, b, [x, y])
r = gradients_impl.gradients(rx, y)[0]
self.assertAllClose(0.0, r.eval())
r = gradients_impl.gradients(rx, x)[0]
self.assertAllClose(156.0, r.eval())
def testWhileGrad_StopGradInsideNoShape(self):
with self.test_session() as sess:
x = array_ops.placeholder(dtypes.float32)
y = array_ops.placeholder(dtypes.float32)
c = lambda x, y: math_ops.less(math_ops.reduce_sum(x), 100.0)
def b(x, y):
y1 = array_ops.stop_gradient(math_ops.square(y, name="stopped"))
x1 = math_ops.add(math_ops.square(x), y1)
return x1, y1
rx, _ = control_flow_ops.while_loop(c, b, [x, y])
r = gradients_impl.gradients(rx, y)[0]
feed_dict = {x: [3.0, 4.0], y: [2.0, 3.0]}
self.assertAllClose([0.0, 0.0], sess.run(r, feed_dict=feed_dict))
r = gradients_impl.gradients(rx, x)[0]
self.assertAllClose([156.0, 400.0], sess.run(r, feed_dict=feed_dict))
name = "gradients/while/stopped_grad"
all_ops = x.graph.get_operations()
self.assertFalse(any([name in op.name for op in all_ops]))
def testWhileGradGradFail(self):
theta = variables.Variable(initial_value=1.)
def fn(prev, x):
return prev + x * theta
result = functional_ops.scan(fn, np.array([1., 2., 3.], dtype=np.float32))
grad_theta = gradients_impl.gradients(result, theta)
with self.assertRaisesRegexp(TypeError, "Second-order gradient"):
gradients_impl.gradients(grad_theta, theta)
grad_theta_stopped = array_ops.stop_gradient(grad_theta)
gradients_impl.gradients(grad_theta_stopped, theta)
def testStopGradOnWhileGrad(self):
with self.test_session():
x = constant_op.constant(2.0, name="x")
y = constant_op.constant(2.0, name="y")
c = lambda x: math_ops.less(x, 100.0)
b = lambda x: math_ops.multiply(x, y)
rx = control_flow_ops.while_loop(c, b, [x])
rg = gradients_impl.gradients(rx, y)[0]
rg = array_ops.stop_gradient(rg)
r = math_ops.add(math_ops.square(y), rx)
r = math_ops.add(r, rg)
r = gradients_impl.gradients(r, y)[0]
self.assertEqual(388.0, r.eval())
def testStopGradMultiFlows(self):
with self.test_session():
def body(i, y, r):
x = variable_scope.get_variable(
"x", shape=(), dtype=dtypes.float32,
initializer=init_ops.ones_initializer())
y *= x
return [i + 1, y, r + math_ops.reduce_sum(y)]
i0 = constant_op.constant(0)
y0 = array_ops.ones(5)
r0 = constant_op.constant(0.0)
cond = lambda i, y, r: i < 1
_, _, r = control_flow_ops.while_loop(
cond, body, [i0, y0, r0], back_prop=True)
vars_ = variables.global_variables()
grads = linalg_ops.norm(gradients_impl.gradients(r, vars_)[0])
z = math_ops.add(r, array_ops.stop_gradient(math_ops.reduce_sum(grads)))
result = gradients_impl.gradients(z, vars_)[0]
variables.global_variables_initializer().run()
self.assertEqual(5.0, result.eval())
def testOneValueCond(self):
with self.test_session():
c = array_ops.placeholder(dtypes.int32, shape=[])
one = ops.convert_to_tensor(1, name="one")
two = ops.convert_to_tensor(2, name="two")
p = math_ops.greater_equal(c, 1)
i = control_flow_ops.cond(p, lambda: one, lambda: two)
self.assertTrue(isinstance(i, ops.Tensor))
# True case: c = 2 is >= 1
self.assertEqual([1], i.eval(feed_dict={c: 2}))
# False case: c = 0 is not >= 1
self.assertEqual([2], i.eval(feed_dict={c: 0}))
def testExampleCond(self):
with self.test_session():
x = ops.convert_to_tensor([-2.0, 2.0], name="x")
d = array_ops.placeholder(dtypes.int32, shape=[])
def l2():
return math_ops.sqrt(math_ops.reduce_sum(math_ops.square(x)))
def l1():
return math_ops.reduce_sum(math_ops.abs(x))
i = control_flow_ops.cond(math_ops.equal(d, 2), l2, l1)
self.assertAllClose(4.0, i.eval(feed_dict={d: 1}))
self.assertAllClose(2.0 * math.sqrt(2), i.eval(feed_dict={d: 2}))
def testCase(self):
with self.test_session():
x = constant_op.constant(1)
y = constant_op.constant(2)
z = constant_op.constant(3)
f1 = lambda: constant_op.constant(17)
f2 = lambda: constant_op.constant(23)
f3 = lambda: constant_op.constant(-1)
r1 = control_flow_ops.case(
{
x < y: f1,
x > z: f2
}, default=f3, exclusive=True)
self.assertAllEqual(r1.eval(), 17)
r2 = control_flow_ops.case([(y > z, f1), (y > x, f2)], default=f3)
self.assertAllEqual(r2.eval(), 23)
# Duplicate events can happen, first one is selected
r3 = control_flow_ops.case([(x < y, f1), (x < y, f2)], default=f3)
self.assertAllEqual(r3.eval(), 17)
# Duplicate events cause an error if exclusive = True
r4 = control_flow_ops.case(
[(x < y, f1), (x < y, f2)], default=f3, exclusive=True)
with self.assertRaisesOpError(
"More than one condition evaluated as True but exclusive=True."):
r4.eval()
# Check that the default is called if none of the others are
r5 = control_flow_ops.case({x > y: f1}, default=f3)
self.assertAllEqual(r5.eval(), -1)
ran_once = [False, False, False]
def break_run_twice(ix):
def _break():
ran_once[ix] = True
return constant_op.constant(ix)
return _break
# Should not fail - each conditional gets called exactly once
# except default. Default gets called twice: once to create an
# empty output and once for the actual cond switch.
r6 = control_flow_ops.case(
[(x < y, break_run_twice(0)), (x > y, break_run_twice(1))],
default=lambda: constant_op.constant(2))
self.assertAllEqual(r6.eval(), 0)
def testCaseSideEffects(self):
with self.test_session() as sess:
v0 = variables.Variable(-1)
v1 = variables.Variable(-1)
v2 = variables.Variable(-1)
a = lambda: control_flow_ops.with_dependencies([state_ops.assign(v0, 0)], 0)
b = lambda: control_flow_ops.with_dependencies([state_ops.assign(v1, 1)], 1)
c = lambda: control_flow_ops.with_dependencies([state_ops.assign(v2, 2)], 2)
x = constant_op.constant(1)
y = constant_op.constant(2)
r0 = control_flow_ops.case(
((x < y, a), (x > y, b)), default=c, exclusive=True)
r1 = control_flow_ops.case(
((x > y, a), (x < y, b)), default=c, exclusive=True)
r2 = control_flow_ops.case(
((x > y, a), (x > y, b)), default=c, exclusive=True)
variables.global_variables_initializer().run()
self.assertAllEqual(sess.run([v0, v1, v2]), [-1] * 3)
self.assertEqual(2, r2.eval())
self.assertAllEqual(sess.run([v0, v1, v2]), [-1, -1, 2])
variables.global_variables_initializer().run()
self.assertAllEqual(sess.run([v0, v1, v2]), [-1] * 3)
self.assertEqual(1, r1.eval())
self.assertAllEqual(sess.run([v0, v1, v2]), [-1, 1, -1])
variables.global_variables_initializer().run()
self.assertAllEqual(sess.run([v0, v1, v2]), [-1] * 3)
self.assertEqual(0, r0.eval())
self.assertAllEqual(sess.run([v0, v1, v2]), [0, -1, -1])
def testOneOpCond(self):
with self.test_session():
v = variables.Variable(0)
c = ops.convert_to_tensor(0)
one = ops.convert_to_tensor(1)
two = ops.convert_to_tensor(2)
p = math_ops.greater_equal(c, 1)
def a():
return state_ops.assign(v, one)
def b():
return state_ops.assign(v, two)
i = control_flow_ops.cond(p, a, b)
self.assertTrue(isinstance(i, ops.Tensor))
variables.global_variables_initializer().run()
self.assertEqual(0, v.eval())
# True case: c = 2 is >= 1, v is set to 1.
self.assertEqual(1, i.eval(feed_dict={c.name: 2}))
self.assertEqual(1, v.eval())
# False case: c = 0 is not >= 1, v is set to 2.
self.assertEqual(2, i.eval(feed_dict={c.name: 0}))
self.assertEqual(2, v.eval())
def testWithOpsDependencies(self):
with self.test_session() as sess:
v = variables.Variable(0.0)
c = constant_op.constant(10)
# Fetching v directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
sess.run([c, v])
# Use a control dependency to ensure init_variable is run
# while asking for c
real_v = control_flow_ops.with_dependencies(
name="real_tensor",
output_tensor=v._ref(), # pylint: disable=protected-access
dependencies=[v.initializer])
c_val, real_v_val = sess.run([c, real_v])
# Ensure the result of 'real_c' is the same as 'c'
self.assertAllEqual(10, c_val)
# Ensure that 'v' is initialized
self.assertAllClose(0.0, real_v_val)
def testWithTensorDependencies(self):
with self.test_session():
v = variables.Variable(0.0)
c1 = constant_op.constant(10)
c2 = constant_op.constant(20)
# c1_with_init_v depends on the init op for v
c1_with_init_v = control_flow_ops.with_dependencies(
name="c1_with_init_v", output_tensor=c1, dependencies=[v.initializer])
# c2_with_c1 depends on the value of c1_with_init_v
c2_with_c1_dep = control_flow_ops.with_dependencies(
name="c2_with_c1_dep",
output_tensor=c2,
dependencies=[c1_with_init_v])
# Fetching v directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v.eval()
# Get the value of 'c2_with_c1_dep', which should cause 'v'
# to be initialized.
self.assertAllEqual(20, c2_with_c1_dep.eval())
# Ensure that 'v' is initialized
self.assertAllClose(0.0, v.eval())
def testWithIndexedSlicesDependencies(self):
with self.test_session():
v = variables.Variable(
np.array([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]]).astype(np.float32))
v_at_1 = ops.IndexedSlices(v, constant_op.constant([1]))
gather_v_at_1 = array_ops.gather(v_at_1.values, v_at_1.indices)
v_at_1_after_init = control_flow_ops.with_dependencies([v.initializer],
v_at_1)
gather_v_at_1_after_init = array_ops.gather(v_at_1_after_init.values,
v_at_1_after_init.indices)
# Fetching gather_v_at_1 will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
gather_v_at_1.eval()
# Getting gather_v_at_1_after_init will work, and initialize v.
self.assertAllEqual([[10.0, 11.0]], gather_v_at_1_after_init.eval())
# Double check that 'v' is initialized
self.assertAllClose([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]], v.eval())
def testDependenciesDevice(self):
with ops.Graph().as_default():
# device set on tensor => same device on dep.
with ops.device("/job:ps"):
vd = variables.Variable([0.0])
with_vd_dep = control_flow_ops.with_dependencies([vd.initializer], vd)
self.assertTrue("/job:ps" in with_vd_dep.device)
# No device set on tensor => no device on dep.
vnod = variables.Variable([0.0])
with_vnod_dep = control_flow_ops.with_dependencies([vnod.initializer],
vnod)
self.assertDeviceEqual(None, with_vnod_dep.device)
# device set on tensor, default device on graph => default device on dep.
vdef = variables.Variable([0.0], name="vdef")
with ops.device("/job:worker/device:GPU:1"):
with_vdef_dep = control_flow_ops.with_dependencies([vdef.initializer],
vdef)
# The device is empty, but the colocation constraint is set.
self.assertDeviceEqual("", with_vdef_dep.device)
self.assertEqual([b"loc:@vdef"], with_vdef_dep.op.colocation_groups())
def testGroup(self):
with self.test_session() as sess:
v1 = variables.Variable([0.0])
v2 = variables.Variable([1.0])
# Group init1 and init2 and run.
init = control_flow_ops.group(v1.initializer, v2.initializer)
# Fetching v1 directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v1.eval()
# Runs "init" before fetching v1 and v2.
init.run()
v1_val, v2_val = sess.run([v1, v2])
# Ensure that v1 and v2 are initialized
self.assertAllClose([0.0], v1_val)
self.assertAllClose([1.0], v2_val)
def testGroupEmpty(self):
op = control_flow_ops.group()
self.assertEqual(op.type, "NoOp")
self.assertEqual(op.control_inputs, [])
def testMergeShapes(self):
# All inputs unknown.
p1 = array_ops.placeholder(dtypes.float32)
p2 = array_ops.placeholder(dtypes.float32)
p3 = array_ops.placeholder(dtypes.float32)
m, index = control_flow_ops.merge([p1, p2, p3])
self.assertIs(None, m.get_shape().ndims)
self.assertEqual([], index.get_shape())
# All inputs known with different ranks.
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[1, 2, 3])
m, index = control_flow_ops.merge([p1, p2])
self.assertIs(None, m.get_shape().ndims)
self.assertEqual([], index.get_shape())
# All inputs known with some dimensions different.
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[2, 1])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, None], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[2, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
# All inputs known with same dimensions.
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([1, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = array_ops.placeholder(dtypes.float32, shape=[None, None])
p2 = array_ops.placeholder(dtypes.float32, shape=[None, None])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, None], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
def testRefSelect(self):
index = array_ops.placeholder(dtypes.int32)
# All inputs unknown.
p1 = array_ops.placeholder(dtypes.float32)
p2 = array_ops.placeholder(dtypes.float32)
p3 = array_ops.placeholder(dtypes.float32)
v1 = variables.Variable(p1, validate_shape=False)
v2 = variables.Variable(p2, validate_shape=False)
v3 = variables.Variable(p3, validate_shape=False)
self.assertIs(None, v1.get_shape().ndims)
s = control_flow_ops.ref_select(index, [v1, v2, v3])
self.assertIs(None, s.get_shape().ndims)
# All inputs known but different.
v1 = variables.Variable([[1, 2]])
v2 = variables.Variable([[2], [1]])
s = control_flow_ops.ref_select(index, [v1, v2])
self.assertIs(None, s.get_shape().ndims)
# All inputs known and same.
v1 = variables.Variable([[1, 2]])
v2 = variables.Variable([[1, 2]])
s = control_flow_ops.ref_select(index, [v1, v2])
self.assertEqual([1, 2], s.get_shape())
# Possibly the same but not guaranteed.
v1 = variables.Variable([[1., 2.]])
p2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
v2 = variables.Variable(p2, validate_shape=False)
s = control_flow_ops.ref_select(index, [v1, v2])
self.assertEqual(None, s.get_shape())
def testRunLoopTensor(self):
with self.test_session() as sess:
tensor_list = []
def condition(t):
return t < constant_op.constant(5)
def body(_):
tensor_list.append(constant_op.constant(5))
return constant_op.constant(10)
result = control_flow_ops.while_loop(condition, body,
[constant_op.constant(4)])
self.assertEqual(10, sess.run(result))
# Ensure that we cannot run a tensor that escapes the loop body
# accidentally.
with self.assertRaises(ValueError):
sess.run(tensor_list[0])
def testWhilePyFuncBasic(self):
def func(x):
return np.square(x)
with self.test_session():
r = control_flow_ops.while_loop(
lambda i, v: i < 4,
lambda i, v: [i + 1, script_ops.py_func(func, [v], [dtypes.float32])[0]],
[constant_op.constant(0), constant_op.constant(2.0, dtypes.float32)],
[tensor_shape.unknown_shape(), tensor_shape.unknown_shape()])
self.assertEqual(r[1].eval(), 65536.0)
def testWhileFuncBasic(self):
@function.Defun(dtypes.float32)
def func(x):
return math_ops.square(math_ops.square(x))
with self.test_session():
x = constant_op.constant(2.0, dtypes.float32)
r = control_flow_ops.while_loop(
lambda i, v: i < 2, lambda i, v: [i + 1, func(v)],
[constant_op.constant(0), x],
[tensor_shape.unknown_shape(), tensor_shape.unknown_shape()])
self.assertEqual(r[1].eval(), 65536.0)
r = gradients_impl.gradients(r, x)[0]
self.assertEqual(r.eval(), 524288.0)
self.assertEqual(
len([op for op in x.graph.get_operations() if op.type == "StackV2"]),
1)
@test_util.with_c_api
class ControlFlowContextCheckTest(test.TestCase):
def _getWhileTensor(self):
"""Creates and returns a tensor from a while context."""
tensor = []
def body(i):
if not tensor:
tensor.append(constant_op.constant(1))
return i + tensor[0]
control_flow_ops.while_loop(lambda i: i < 10, body, [0])
return tensor[0]
def _getCondTensor(self):
cond_tensor = []
def true_fn():
if not cond_tensor:
cond_tensor.append(constant_op.constant(1))
return cond_tensor[0]
control_flow_ops.cond(math_ops.less(1, 2), true_fn,
lambda: constant_op.constant(0))
return cond_tensor[0]
def testInvalidContext(self):
# Accessing a while loop tensor outside of control flow is illegal.
while_tensor = self._getWhileTensor()
with self.assertRaisesRegexp(
ValueError,
"Cannot use 'while/Const_1' as input to 'Add' because 'while/Const_1' "
"is in a while loop. See info log for more details."):
math_ops.add(1, while_tensor)
def testInvalidContextInCond(self):
# Accessing a while loop tensor in cond is illegal.
while_tensor = self._getWhileTensor()
with self.assertRaisesRegexp(
ValueError,
"Cannot use 'while/Const_1' as input to 'cond/Add' because "
"'while/Const_1' is in a while loop. See info log for more details."):
# TODO(skyewm): this passes if we return while_tensor directly instead
# of using it as input to another op.
control_flow_ops.cond(math_ops.less(1, 2),
lambda: math_ops.add(1, while_tensor),
lambda: constant_op.constant(0))
def testInvalidContextInWhile(self):
# Accessing a while loop tensor in a different while loop is illegal.
while_tensor = self._getWhileTensor()
with self.assertRaisesRegexp(
ValueError,
"Cannot use 'while_1/Add' as input to 'while/Const_1' because they are "
"in different while loops. See info log for more details."):
control_flow_ops.while_loop(lambda i: i < 10,
lambda x: math_ops.add(1, while_tensor), [0])
with self.assertRaisesRegexp(
ValueError,
"Cannot use 'while_2/NextIteration' as input to 'while/Const_1' "
"because they are in different while loops. See info log for more "
"details."):
control_flow_ops.while_loop(lambda i: i < 10, lambda i: while_tensor, [0])
def testValidCondContext(self):
# Accessing a tensor from a cond context is OK (although dangerous).
cond_tensor = self._getCondTensor()
math_ops.add(1, cond_tensor)
def testValidCondContextBranches(self):
# Accessing a tensor from a cond context from the other branch's cond
# context is OK (although dangerous).
cond_tensor = []
def branch_fn():
if not cond_tensor:
cond_tensor.append(constant_op.constant(1))
return cond_tensor[0]
control_flow_ops.cond(math_ops.less(1, 2), branch_fn, branch_fn)
def testValidWhileContext(self):
# Accessing a tensor in a nested while is OK.
def body(_):
c = constant_op.constant(1)
return control_flow_ops.while_loop(lambda i: i < 3, lambda i: i + c, [0])
control_flow_ops.while_loop(lambda i: i < 5, body, [0])
def testValidNestedContexts(self):
# Accessing a tensor from a cond context in a while context, all inside an
# outer while context, is OK.
def body(_):
cond_tensor = self._getCondTensor()
# Create another cond containing the while loop for good measure
return control_flow_ops.cond(
math_ops.less(1, 2),
lambda: control_flow_ops.while_loop(lambda i: i < 3,
lambda i: i + cond_tensor, [0]),
lambda: constant_op.constant(0))
control_flow_ops.while_loop(lambda i: i < 5, body, [0])
def testInvalidNestedContexts(self):
# Accessing a tensor from a while context in a different while context, all
# inside a cond context, is illegal.
def true_fn():
while_tensor = self._getWhileTensor()
return control_flow_ops.while_loop(lambda i: i < 3,
lambda i: i + while_tensor, [0])
with self.assertRaisesRegexp(
ValueError,
"Cannot use 'cond/while_1/add' as input to 'cond/while/Const_1' because"
" they are in different while loops. See info log for more details."):
control_flow_ops.cond(math_ops.less(1, 2), true_fn,
lambda: constant_op.constant(0))
@test_util.with_c_api
class TupleTest(test.TestCase):
def testTensors(self):
for v1_first in [True, False]:
with self.test_session():
v1 = variables.Variable([1.0])
add1 = math_ops.add(
control_flow_ops.with_dependencies([v1.initializer], v1._ref()), # pylint: disable=protected-access
2.0)
v2 = variables.Variable([10.0])
add2 = math_ops.add(
control_flow_ops.with_dependencies([v2.initializer], v2._ref()), # pylint: disable=protected-access
20.0)
t1, _, t2 = control_flow_ops.tuple([add1, None, add2])
# v1 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v1.eval()
# v2 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v2.eval()
if v1_first:
# Getting t1 initializes v2.
self.assertAllClose([3.0], t1.eval())
self.assertAllClose([10.0], v2.eval())
else:
# Getting t2 initializes v1.
self.assertAllClose([30.0], t2.eval())
self.assertAllClose([1.0], v1.eval())
def testIndexedSlices(self):
for v1_first in [True, False]:
with self.test_session():
v1 = variables.Variable(
np.array([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]]).astype(
np.float32))
v1_at_1 = ops.IndexedSlices(
control_flow_ops.with_dependencies([v1.initializer], v1._ref()), # pylint: disable=protected-access
constant_op.constant([1]))
v2 = variables.Variable(
np.array([[0.1, 1.1], [10.1, 11.1], [20.1, 21.1]]).astype(
np.float32))
v2_at_1 = ops.IndexedSlices(
control_flow_ops.with_dependencies([v2.initializer], v2._ref()), # pylint: disable=protected-access
constant_op.constant([1]))
st1, st2 = control_flow_ops.tuple([v1_at_1, v2_at_1])
g1 = array_ops.gather(st1.values, st1.indices)
g2 = array_ops.gather(st2.values, st2.indices)
# v1 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v1.eval()
# v2 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v2.eval()
if v1_first:
# Getting g1 initializes v2.
self.assertAllClose([[10.0, 11.0]], g1.eval())
self.assertAllClose([[0.1, 1.1], [10.1, 11.1], [20.1, 21.1]],
v2.eval())
else:
# Getting g2 initializes v1.
self.assertAllClose([[10.1, 11.1]], g2.eval())
self.assertAllClose([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]],
v1.eval())
def testAcceptTensorsAsControlInputs(self):
with self.test_session():
var = variables.Variable(0)
assign = state_ops.assign(var, 1)
t, = control_flow_ops.tuple(
[constant_op.constant(0)], control_inputs=[assign])
# Should trigger the assign.
t.eval()
self.assertEquals(1, var.eval())
@test_util.with_c_api
class AssertTest(test.TestCase):
def testGuardedAssertDoesNotCopyWhenTrue(self):
with self.test_session(use_gpu=True) as sess:
with ops.device(test.gpu_device_name()):
value = constant_op.constant(1.0)
with ops.device("/cpu:0"):
true = constant_op.constant(True)
guarded_assert = control_flow_ops.Assert(true, [value], name="guarded")
unguarded_assert = gen_logging_ops._assert(
true, [value], name="unguarded")
opts = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE)
guarded_metadata = config_pb2.RunMetadata()
sess.run(guarded_assert, options=opts, run_metadata=guarded_metadata)
unguarded_metadata = config_pb2.RunMetadata()
sess.run(unguarded_assert, options=opts, run_metadata=unguarded_metadata)
guarded_nodestat_names = [
n.node_name
for d in guarded_metadata.step_stats.dev_stats for n in d.node_stats
]
unguarded_nodestat_names = [
n.node_name
for d in unguarded_metadata.step_stats.dev_stats for n in d.node_stats
]
guarded_memcpy_nodestat_names = [
n for n in guarded_nodestat_names if "MEMCPYDtoH" in n
]
unguarded_memcpy_nodestat_names = [
n for n in unguarded_nodestat_names if "MEMCPYDtoH" in n
]
if "GPU" in [d.device_type for d in device_lib.list_local_devices()]:
# A copy was performed for the unguarded assert
self.assertLess(0, len(unguarded_memcpy_nodestat_names))
# No copy was performed for the guarded assert
self.assertEqual([], guarded_memcpy_nodestat_names)
@test_util.with_c_api
class WhileOpBenchmark(test.Benchmark):
"""Evaluate the performance of while_loop op."""
def _getInitVariables(self):
batch_size = 10
image_size = 256
kernel_size = 3
depth = 16
init_step = constant_op.constant(-1)
image = variable_scope.get_variable(
"image",
initializer=random_ops.random_normal(
[batch_size, image_size, image_size, depth],
dtype=dtypes.float32,
stddev=1e-1))
kernel = variable_scope.get_variable(
"weights",
initializer=random_ops.truncated_normal(
[kernel_size, kernel_size, depth, depth],
dtype=dtypes.float32,
stddev=1e-1))
return init_step, image, kernel
def _runOneBenchmark(self,
default_device,
num_iters=10,
static_unroll=False,
steps=10):
"""Evaluate the while loop performance.
Args:
default_device: The default device to run all ops except the loop_body.
loop_body is always run on GPU.
num_iters: Number of iterations to run.
static_unroll: If true, run unrolled version; otherwise, run while_loop.
steps: Total number of repeated steps to run the loop.
Returns:
The duration of the run in seconds.
"""
def loop_body(i, x):
with ops.device("/gpu:0"):
# Always put loop body on GPU.
nx = nn_ops.conv2d(
input=x,
filter=kernel,
strides=[1, 1, 1, 1],
padding="SAME",
data_format="NHWC",
name="conv2d")
ni = math_ops.add(i, 1)
return ni, nx
ops.reset_default_graph()
with session.Session() as sess, ops.device(default_device):
# Get the initial id i, input x, and kernel.
i, x, kernel = self._getInitVariables()
sess.run(variables.global_variables_initializer())
if static_unroll:
for _ in xrange(steps):
i, x = loop_body(i, x)
else:
i, x = control_flow_ops.while_loop(
lambda i, _: i < steps,
loop_body, [i, x],
parallel_iterations=steps,
swap_memory=True)
r = math_ops.reduce_sum(x)
dx, dk = gradients_impl.gradients(r, [x, kernel])
# Use group to avoid fetching back results.
r = control_flow_ops.group(dx, dk)
for _ in xrange(3):
# exclude warm up time
sess.run(r)
start_time = time.time()
for _ in xrange(num_iters):
sess.run(r)
return (time.time() - start_time)/num_iters
def benchmarkWhileOpCrossDevicePlacement(self):
iters = 10
# Run loop body on GPU, but other ops on CPU.
duration = self._runOneBenchmark("cpu", iters, static_unroll=False)
self.report_benchmark(
name="while_op_cross_device", iters=iters, wall_time=duration)
def benchmarkWhileOpSameDevicePlacement(self):
iters = 10
# Run all ops on the same GPU device.
duration = self._runOneBenchmark("gpu", iters, static_unroll=False)
self.report_benchmark(
name="while_op_same_device", iters=iters, wall_time=duration)
def benchmarkWhileOpUnrollCrossDevicePlacement(self):
iters = 10
# Run loop body on GPU, but other ops on CPU.
duration = self._runOneBenchmark("cpu", iters, static_unroll=True)
self.report_benchmark(
name="unroll_cross_device_cpu", iters=iters, wall_time=duration)
def benchmarkWhileOpUnrollSameDevicePlacement(self):
iters = 10
# Run all ops on GPU.
duration = self._runOneBenchmark("gpu", iters, static_unroll=True)
self.report_benchmark(
name="unroll_same_device", iters=iters, wall_time=duration)
@test_util.with_c_api
class EagerTest(test.TestCase):
def testCond(self):
with context.eager_mode():
pred = math_ops.less(1, 2)
fn1 = lambda: [constant_op.constant(10)]
fn2 = lambda: [constant_op.constant(20)]
r = control_flow_ops.cond(pred, fn1, fn2)
self.assertAllEqual(r.numpy(), 10)
self.assertFalse(isinstance(r, list))
def testWhileLoop(self):
with context.eager_mode():
tensor = constant_op.constant([1, 2, 3, 4, 5])
self.assertAllEqual(isum(tensor).numpy(),
[46, 47, 48, 49, 50])
def testWhileLoopWithMaxIterations(self):
with context.eager_mode():
tensor = constant_op.constant([1, 2, 3, 4, 5])
self.assertAllEqual(isum(tensor, maximum_iterations=3).numpy(),
[1+3, 2+3, 3+3, 4+3, 5+3])
def testWhileWithMaximumIterationsAndSingleArgument(self):
with context.eager_mode():
tensor = constant_op.constant(0)
r = control_flow_ops.while_loop(
lambda i: i < 3,
lambda i: i + 1,
[tensor],
maximum_iterations=1)
self.assertEqual(1, r.numpy())
def testWithDependencies(self):
with context.eager_mode():
t1 = constant_op.constant(1)
t2 = constant_op.constant(2)
t3 = control_flow_ops.with_dependencies(t1, t2)
self.assertAllEqual(t2.numpy(), t3.numpy())
def testTuple(self):
with context.eager_mode():
t1 = constant_op.constant(1)
t2 = constant_op.constant(2)
tup1, tup2 = control_flow_ops.tuple([t1, t2])
self.assertAllEqual(t1.numpy(), tup1.numpy())
self.assertAllEqual(t2.numpy(), tup2.numpy())
def testCase(self):
with context.eager_mode():
x = constant_op.constant(1)
y = constant_op.constant(2)
z = constant_op.constant(3)
f1 = lambda: constant_op.constant(17)
f2 = lambda: constant_op.constant(23)
f3 = lambda: constant_op.constant(-1)
r1 = control_flow_ops.case([(x < y, f1), (x > z, f2)],
default=f3, exclusive=True)
self.assertAllEqual(r1.numpy(), 17)
if __name__ == "__main__":
test.main()
| {
"content_hash": "dc69c31aad065e64086e14a1023e830f",
"timestamp": "",
"source": "github",
"line_count": 3033,
"max_line_length": 112,
"avg_line_length": 36.15891856247939,
"alnum_prop": 0.6052156469408224,
"repo_name": "Kongsea/tensorflow",
"id": "35ae89ed33fc2c2f1dbce1ee7bd724555b4fb0a2",
"size": "110393",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/kernel_tests/control_flow_ops_py_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "8458"
},
{
"name": "C",
"bytes": "198923"
},
{
"name": "C++",
"bytes": "29494349"
},
{
"name": "CMake",
"bytes": "644855"
},
{
"name": "Go",
"bytes": "976410"
},
{
"name": "Java",
"bytes": "409984"
},
{
"name": "Jupyter Notebook",
"bytes": "1833675"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "38189"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Perl",
"bytes": "6715"
},
{
"name": "Protocol Buffer",
"bytes": "270658"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "26227666"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "373711"
}
],
"symlink_target": ""
} |
"""
XML utilities.
"""
import contextlib
import io
from xml.sax import saxutils
class XMLBuilder:
"""
XML document builder. The code is purposely namespace ignorant: it's up to
user to supply appropriate `xmlns` attributes as needed.
>>> xml = XMLBuilder()
>>> with xml.within('root', xmlns='tag:talideon.com,2013:test'):
... xml += 'Before'
... with xml.within('leaf'):
... xml += 'Within'
... xml += 'After'
... xml.tag('leaf', 'Another')
>>> print xml.as_string()
<?xml version="1.0" encoding="utf-8"?>
<root xmlns="tag:talideon.com,2013:test">Before<leaf>Within</leaf>After<leaf>Another</leaf></root>
"""
def __init__(self, out=None, encoding="utf-8"):
"""
`out` should be a file-like object to write the document to. If none
is provided, a buffer is created.
Note that if you provide your own, `as_string()` will return `None`
as no other sensible value can be returned.
"""
if out is None:
self.buffer = io.StringIO()
out = self.buffer
else:
self.buffer = None
self.generator = saxutils.XMLGenerator(out, encoding)
self.generator.startDocument()
@contextlib.contextmanager
def within(self, tag, **attrs):
"""
Generates an element containing nested elements.
"""
self.generator.startElement(tag, attrs)
yield
self.generator.endElement(tag)
def tag(self, tag, *values, **attrs):
"""
Generates a simple element.
"""
self.generator.startElement(tag, attrs)
for value in values:
self.generator.characters(value)
self.generator.endElement(tag)
def __getattr__(self, tag):
return lambda *values, **attrs: self.tag(tag, *values, **attrs)
def append(self, other):
"""
Append the string to this document.
"""
self.generator.characters(other)
return self
def as_string(self):
"""
If using the built-in buffer, get its current contents.
"""
return None if self.buffer is None else self.buffer.getvalue()
def close(self):
"""
If using the built-in buffer, clean it up.
"""
if self.buffer is not None:
self.buffer.close()
self.buffer = None
# Shortcuts.
__iadd__ = append
__str__ = as_string
| {
"content_hash": "31f139efe25307c303e5d522b74ef2c2",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 102,
"avg_line_length": 28.517241379310345,
"alnum_prop": 0.5711406690850463,
"repo_name": "kgaughan/adjunct",
"id": "a8910a3db6c4584d8bb48a0a7159654773d58409",
"size": "2481",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "adjunct/xmlutils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2329"
},
{
"name": "Makefile",
"bytes": "571"
},
{
"name": "Python",
"bytes": "54724"
}
],
"symlink_target": ""
} |
from plutil import *
import sys
if __name__ == '__main__':
if len(sys.argv) != 7:
print 'usage: %s [socialize_config] [api_config] [url] [secureurl] [key] [secret]' % sys.argv[0]
sys.exit(1)
socialize_config, api_config, url, securl, key, secret = sys.argv[1:]
socialize_update = {
'URLs': {
'RestserverBaseURL': url,
'SecureRestserverBaseURL': securl,
}
}
try:
update_plist(socialize_config, socialize_update)
except IOError, e:
print 'Update failed for %s -- %s' % (socialize_config, e)
apiupdate = {
'Socialize API info': {
'key': key,
'secret': secret,
}
}
try:
update_plist(api_config, apiupdate)
except IOError, e:
print 'Update failed for %s -- %s' % (socialize_config, e)
print 'Successfully updated %s and %s' % (socialize_config, api_config)
| {
"content_hash": "9dca632134c55db0d9426392e829016d",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 100,
"avg_line_length": 24.142857142857142,
"alnum_prop": 0.6082840236686391,
"repo_name": "dontfallisleep/socialize-sdk-ios",
"id": "490cb064ffff3c3d5b91f1d7526c7363e6e86a18",
"size": "868",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Scripts/socialize-config.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "AppleScript",
"bytes": "248"
},
{
"name": "C",
"bytes": "9495"
},
{
"name": "Groff",
"bytes": "65678"
},
{
"name": "HTML",
"bytes": "1155"
},
{
"name": "Makefile",
"bytes": "10221"
},
{
"name": "Objective-C",
"bytes": "1869348"
},
{
"name": "Perl",
"bytes": "377988"
},
{
"name": "Python",
"bytes": "2952"
},
{
"name": "Ruby",
"bytes": "2922"
},
{
"name": "Shell",
"bytes": "19138"
}
],
"symlink_target": ""
} |
"""CounterGroup Handler that tracks moving interval percentiles over 4 periods
(1m, 10m, 1h, 1d).
"""
from taba.handlers.counter_group import CounterGroup
from taba.handlers.moving_interval_percentile import MovingIntervalPercentile
MOVING_INTERVAL_PERCENTILE = MovingIntervalPercentile()
class MovingIntervalPercentileGroup(CounterGroup):
"""CounterGroup Handler that tracks moving interval percentiles over 4
periods (1m, 10m, 1h, 1d).
"""
CURRENT_VERSION = 0
COUNTERS = [
MOVING_INTERVAL_PERCENTILE,
MOVING_INTERVAL_PERCENTILE,
MOVING_INTERVAL_PERCENTILE,
MOVING_INTERVAL_PERCENTILE]
LABELS = ['1m', '10m', '1h', '1d']
def __init__(self):
self._server = None
@property
def server(self):
return self._server
@server.setter
def server(self, value):
self._server = value
MOVING_INTERVAL_PERCENTILE.server = value
def NewState(self, client_id, name):
# 1m @ 15s, 10m @ 2.5m, 1h @ 15m, 1d @ 6h
states = [
MOVING_INTERVAL_PERCENTILE.NewState(client_id, name, 15, 4, 64),
MOVING_INTERVAL_PERCENTILE.NewState(client_id, name, 150, 4, 128),
MOVING_INTERVAL_PERCENTILE.NewState(client_id, name, 900, 4, 128),
MOVING_INTERVAL_PERCENTILE.NewState(client_id, name, 21600, 4, 128), ]
return CounterGroup.NewState(self, client_id, name, states)
def Upgrade(self, group_state, version):
"""See base class definition."""
if version == 0:
return group_state
else:
raise ValueError('Unknown version %s' % version)
| {
"content_hash": "af56a7e55e5005b65bb4a2ea4f736571",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 78,
"avg_line_length": 30.84,
"alnum_prop": 0.6880674448767834,
"repo_name": "tellapart/taba",
"id": "9bed43cf57069e90f77c88500d5d233f9a6590f0",
"size": "2122",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/taba/handlers/moving_interval_percentile_group.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "540390"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Folder',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(blank=True, default='', max_length=1024, null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Note',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(blank=True, default='', max_length=1024, null=True)),
('content', models.TextField(blank=True, default='', null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('folder', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='notes.Folder')),
],
options={
'ordering': ['-updated_at'],
},
),
]
| {
"content_hash": "6dc6166c3b0e1c6d64ec64a8e14b7f54",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 122,
"avg_line_length": 38.666666666666664,
"alnum_prop": 0.5531609195402298,
"repo_name": "asommer70/strangway",
"id": "c3520ef15b290938a802cd4f56bd5489d36734ea",
"size": "1441",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "notes/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "23895"
},
{
"name": "HTML",
"bytes": "9786"
},
{
"name": "JavaScript",
"bytes": "1391"
},
{
"name": "Python",
"bytes": "19368"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Page.orderid'
db.alter_column('pages_page', 'orderid', self.gf('django.db.models.fields.DecimalField')(unique=True, max_digits=2, decimal_places=1))
def backwards(self, orm):
# Changing field 'Page.orderid'
db.alter_column('pages_page', 'orderid', self.gf('django.db.models.fields.DecimalField')(unique=True, max_digits=2, decimal_places=2))
models = {
'pages.page': {
'Meta': {'object_name': 'Page'},
'activated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'body': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'display': ('django.db.models.fields.CharField', [], {'default': "'body'", 'max_length': '20'}),
'icon': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'orderid': ('django.db.models.fields.DecimalField', [], {'unique': 'True', 'max_digits': '2', 'decimal_places': '1'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
}
}
complete_apps = ['pages'] | {
"content_hash": "a3bbf6a34b06d550436574e7e6b2d6c9",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 144,
"avg_line_length": 52.27777777777778,
"alnum_prop": 0.5807651434643996,
"repo_name": "LambdaCast/LambdaCast",
"id": "654de6edc03edb44ebb25d6e299554dbbaba532b",
"size": "1906",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "pages/migrations/0003_auto__chg_field_page_orderid.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "58222"
},
{
"name": "HTML",
"bytes": "60231"
},
{
"name": "JavaScript",
"bytes": "122667"
},
{
"name": "Python",
"bytes": "928261"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
import collections
import contextlib
import logging
import operator
from mopidy import compat, exceptions, models
from mopidy.compat import urllib
from mopidy.internal import deprecation, validation
logger = logging.getLogger(__name__)
@contextlib.contextmanager
def _backend_error_handling(backend, reraise=None):
try:
yield
except exceptions.ValidationError as e:
logger.error('%s backend returned bad data: %s',
backend.actor_ref.actor_class.__name__, e)
except Exception as e:
if reraise and isinstance(e, reraise):
raise
logger.exception('%s backend caused an exception.',
backend.actor_ref.actor_class.__name__)
class LibraryController(object):
pykka_traversable = True
def __init__(self, backends, core):
self.backends = backends
self.core = core
def _get_backend(self, uri):
uri_scheme = urllib.parse.urlparse(uri).scheme
return self.backends.with_library.get(uri_scheme, None)
def _get_backends_to_uris(self, uris):
if uris:
backends_to_uris = collections.defaultdict(list)
for uri in uris:
backend = self._get_backend(uri)
if backend is not None:
backends_to_uris[backend].append(uri)
else:
backends_to_uris = dict([
(b, None) for b in self.backends.with_library.values()])
return backends_to_uris
def browse(self, uri):
"""
Browse directories and tracks at the given ``uri``.
``uri`` is a string which represents some directory belonging to a
backend. To get the intial root directories for backends pass
:class:`None` as the URI.
Returns a list of :class:`mopidy.models.Ref` objects for the
directories and tracks at the given ``uri``.
The :class:`~mopidy.models.Ref` objects representing tracks keep the
track's original URI. A matching pair of objects can look like this::
Track(uri='dummy:/foo.mp3', name='foo', artists=..., album=...)
Ref.track(uri='dummy:/foo.mp3', name='foo')
The :class:`~mopidy.models.Ref` objects representing directories have
backend specific URIs. These are opaque values, so no one but the
backend that created them should try and derive any meaning from them.
The only valid exception to this is checking the scheme, as it is used
to route browse requests to the correct backend.
For example, the dummy library's ``/bar`` directory could be returned
like this::
Ref.directory(uri='dummy:directory:/bar', name='bar')
:param string uri: URI to browse
:rtype: list of :class:`mopidy.models.Ref`
.. versionadded:: 0.18
"""
if uri is None:
return self._roots()
elif not uri.strip():
return []
validation.check_uri(uri)
return self._browse(uri)
def _roots(self):
directories = set()
backends = self.backends.with_library_browse.values()
futures = {b: b.library.root_directory for b in backends}
for backend, future in futures.items():
with _backend_error_handling(backend):
root = future.get()
validation.check_instance(root, models.Ref)
directories.add(root)
return sorted(directories, key=operator.attrgetter('name'))
def _browse(self, uri):
scheme = urllib.parse.urlparse(uri).scheme
backend = self.backends.with_library_browse.get(scheme)
if not backend:
return []
with _backend_error_handling(backend):
result = backend.library.browse(uri).get()
validation.check_instances(result, models.Ref)
return result
return []
def get_distinct(self, field, query=None):
"""
List distinct values for a given field from the library.
This has mainly been added to support the list commands the MPD
protocol supports in a more sane fashion. Other frontends are not
recommended to use this method.
:param string field: One of ``track``, ``artist``, ``albumartist``,
``album``, ``composer``, ``performer``, ``date`` or ``genre``.
:param dict query: Query to use for limiting results, see
:meth:`search` for details about the query format.
:rtype: set of values corresponding to the requested field type.
.. versionadded:: 1.0
"""
validation.check_choice(field, validation.DISTINCT_FIELDS)
query is None or validation.check_query(query) # TODO: normalize?
result = set()
futures = {b: b.library.get_distinct(field, query)
for b in self.backends.with_library.values()}
for backend, future in futures.items():
with _backend_error_handling(backend):
values = future.get()
if values is not None:
validation.check_instances(values, compat.text_type)
result.update(values)
return result
def get_images(self, uris):
"""Lookup the images for the given URIs
Backends can use this to return image URIs for any URI they know about
be it tracks, albums, playlists. The lookup result is a dictionary
mapping the provided URIs to lists of images.
Unknown URIs or URIs the corresponding backend couldn't find anything
for will simply return an empty list for that URI.
:param uris: list of URIs to find images for
:type uris: list of string
:rtype: {uri: tuple of :class:`mopidy.models.Image`}
.. versionadded:: 1.0
"""
validation.check_uris(uris)
futures = {
backend: backend.library.get_images(backend_uris)
for (backend, backend_uris)
in self._get_backends_to_uris(uris).items() if backend_uris}
results = {uri: tuple() for uri in uris}
for backend, future in futures.items():
with _backend_error_handling(backend):
if future.get() is None:
continue
validation.check_instance(future.get(), collections.Mapping)
for uri, images in future.get().items():
if uri not in uris:
raise exceptions.ValidationError(
'Got unknown image URI: %s' % uri)
validation.check_instances(images, models.Image)
results[uri] += tuple(images)
return results
def find_exact(self, query=None, uris=None, **kwargs):
"""Search the library for tracks where ``field`` is ``values``.
.. deprecated:: 1.0
Use :meth:`search` with ``exact`` set.
"""
deprecation.warn('core.library.find_exact')
return self.search(query=query, uris=uris, exact=True, **kwargs)
def lookup(self, uri=None, uris=None):
"""
Lookup the given URIs.
If the URI expands to multiple tracks, the returned list will contain
them all.
:param uri: track URI
:type uri: string or :class:`None`
:param uris: track URIs
:type uris: list of string or :class:`None`
:rtype: list of :class:`mopidy.models.Track` if uri was set or
{uri: list of :class:`mopidy.models.Track`} if uris was set.
.. versionadded:: 1.0
The ``uris`` argument.
.. deprecated:: 1.0
The ``uri`` argument. Use ``uris`` instead.
"""
if sum(o is not None for o in [uri, uris]) != 1:
raise ValueError('Exactly one of "uri" or "uris" must be set')
uris is None or validation.check_uris(uris)
uri is None or validation.check_uri(uri)
if uri:
deprecation.warn('core.library.lookup:uri_arg')
if uri is not None:
uris = [uri]
futures = {}
results = {u: [] for u in uris}
# TODO: lookup(uris) to backend APIs
for backend, backend_uris in self._get_backends_to_uris(uris).items():
for u in backend_uris:
futures[(backend, u)] = backend.library.lookup(u)
for (backend, u), future in futures.items():
with _backend_error_handling(backend):
result = future.get()
if result is not None:
validation.check_instances(result, models.Track)
# TODO Consider making Track.uri field mandatory, and
# then remove this filtering of tracks without URIs.
results[u] = [r for r in result if r.uri]
if uri:
return results[uri]
return results
def refresh(self, uri=None):
"""
Refresh library. Limit to URI and below if an URI is given.
:param uri: directory or track URI
:type uri: string
"""
uri is None or validation.check_uri(uri)
futures = {}
backends = {}
uri_scheme = urllib.parse.urlparse(uri).scheme if uri else None
for backend_scheme, backend in self.backends.with_library.items():
backends.setdefault(backend, set()).add(backend_scheme)
for backend, backend_schemes in backends.items():
if uri_scheme is None or uri_scheme in backend_schemes:
futures[backend] = backend.library.refresh(uri)
for backend, future in futures.items():
with _backend_error_handling(backend):
future.get()
def search(self, query=None, uris=None, exact=False, **kwargs):
"""
Search the library for tracks where ``field`` contains ``values``.
``field`` can be one of ``uri``, ``track_name``, ``album``, ``artist``,
``albumartist``, ``composer``, ``performer``, ``track_no``, ``genre``,
``date``, ``comment`` or ``any``.
If ``uris`` is given, the search is limited to results from within the
URI roots. For example passing ``uris=['file:']`` will limit the search
to the local backend.
Examples::
# Returns results matching 'a' in any backend
search({'any': ['a']})
# Returns results matching artist 'xyz' in any backend
search({'artist': ['xyz']})
# Returns results matching 'a' and 'b' and artist 'xyz' in any
# backend
search({'any': ['a', 'b'], 'artist': ['xyz']})
# Returns results matching 'a' if within the given URI roots
# "file:///media/music" and "spotify:"
search({'any': ['a']}, uris=['file:///media/music', 'spotify:'])
# Returns results matching artist 'xyz' and 'abc' in any backend
search({'artist': ['xyz', 'abc']})
:param query: one or more queries to search for
:type query: dict
:param uris: zero or more URI roots to limit the search to
:type uris: list of string or :class:`None`
:param exact: if the search should use exact matching
:type exact: :class:`bool`
:rtype: list of :class:`mopidy.models.SearchResult`
.. versionadded:: 1.0
The ``exact`` keyword argument, which replaces :meth:`find_exact`.
.. deprecated:: 1.0
Previously, if the query was empty, and the backend could support
it, all available tracks were returned. This has not changed, but
it is strongly discouraged. No new code should rely on this
behavior.
.. deprecated:: 1.1
Providing the search query via ``kwargs`` is no longer supported.
"""
query = _normalize_query(query or kwargs)
uris is None or validation.check_uris(uris)
query is None or validation.check_query(query)
validation.check_boolean(exact)
if kwargs:
deprecation.warn('core.library.search:kwargs_query')
if not query:
deprecation.warn('core.library.search:empty_query')
futures = {}
for backend, backend_uris in self._get_backends_to_uris(uris).items():
futures[backend] = backend.library.search(
query=query, uris=backend_uris, exact=exact)
# Some of our tests check for LookupError to catch bad queries. This is
# silly and should be replaced with query validation before passing it
# to the backends.
reraise = (TypeError, LookupError)
results = []
for backend, future in futures.items():
try:
with _backend_error_handling(backend, reraise=reraise):
result = future.get()
if result is not None:
validation.check_instance(result, models.SearchResult)
results.append(result)
except TypeError:
backend_name = backend.actor_ref.actor_class.__name__
logger.warning(
'%s does not implement library.search() with "exact" '
'support. Please upgrade it.', backend_name)
return results
def _normalize_query(query):
broken_client = False
# TODO: this breaks if query is not a dictionary like object...
for (field, values) in query.items():
if isinstance(values, compat.string_types):
broken_client = True
query[field] = [values]
if broken_client:
logger.warning(
'A client or frontend made a broken library search. Values in '
'queries must be lists of strings, not a string. Please check what'
' sent this query and file a bug. Query: %s', query)
if not query:
logger.warning(
'A client or frontend made a library search with an empty query. '
'This is strongly discouraged. Please check what sent this query '
'and file a bug.')
return query
| {
"content_hash": "84132d7c4a3deceb05366b2cb767ef38",
"timestamp": "",
"source": "github",
"line_count": 377,
"max_line_length": 79,
"avg_line_length": 37.681697612732094,
"alnum_prop": 0.587146276221315,
"repo_name": "vrs01/mopidy",
"id": "04fe0a7e5ba42c0394cb90c0f6311f7dac358e4c",
"size": "14206",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "mopidy/core/library.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "610"
},
{
"name": "Groff",
"bytes": "573"
},
{
"name": "HTML",
"bytes": "805"
},
{
"name": "JavaScript",
"bytes": "82060"
},
{
"name": "Python",
"bytes": "1192583"
},
{
"name": "Shell",
"bytes": "556"
}
],
"symlink_target": ""
} |
import unittest
from maec.utils.nsparser import MAEC_NAMESPACES
class NSParserTests(unittest.TestCase):
def test_import(self):
"""Verify that the namespace list was imported successfully."""
self.assertTrue(MAEC_NAMESPACES)
self.assertEqual(3, len(MAEC_NAMESPACES))
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "6e516394b972f056e53fd0045c45a771",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 71,
"avg_line_length": 23.133333333333333,
"alnum_prop": 0.6858789625360231,
"repo_name": "MAECProject/python-maec",
"id": "6215e129b4dba51bfe620a99cf894052cc59e77d",
"size": "452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "maec/test/utils/nsparser_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "862178"
}
],
"symlink_target": ""
} |
import numpy as np
from STObject import STObject
from hagelslag.util.munkres import Munkres
class ObjectMatcher(object):
"""
ObjectMatcher calculates distances between two sets of objects and determines the optimal object assignments
based on the Hungarian object matching algorithm. ObjectMatcher supports the use of the weighted average of
multiple cost functions to determine the distance between objects. Upper limits to each distance component are used
to exclude the matching of objects that are too far apart.
"""
def __init__(self, cost_function_components, weights, max_values):
self.cost_function_components = cost_function_components
self.weights = weights
self.max_values = max_values
if self.weights.sum() != 1:
self.weights /= float(self.weights.sum())
return
def match_objects(self, set_a, set_b, time_a, time_b):
"""
Match two sets of objects at particular times.
:param set_a: list of STObjects
:param set_b: list of STObjects
:param time_a: time at which set_a is being evaluated for matching
:param time_b: time at which set_b is being evaluated for matching
:return: list of tuples containing (set_a index, set_b index) for each match
"""
costs = self.cost_matrix(set_a, set_b, time_a, time_b) * 100
min_row_costs = costs.min(axis=1)
min_col_costs = costs.min(axis=0)
good_rows = np.where(min_row_costs < 100)[0]
good_cols = np.where(min_col_costs < 100)[0]
assignments = []
if len(good_rows) > 0 and len(good_cols) > 0:
munk = Munkres()
initial_assignments = munk.compute(costs[np.meshgrid(good_rows, good_cols, indexing='ij')].tolist())
initial_assignments = [(good_rows[x[0]], good_cols[x[1]]) for x in initial_assignments]
for a in initial_assignments:
if costs[a[0], a[1]] < 100:
assignments.append(a)
return assignments
def cost_matrix(self, set_a, set_b, time_a, time_b):
costs = np.zeros((len(set_a), len(set_b)))
for a, item_a in enumerate(set_a):
for b, item_b in enumerate(set_b):
costs[a, b] = self.total_cost_function(item_a, item_b, time_a, time_b)
return costs
def total_cost_function(self, item_a, item_b, time_a, time_b):
distances = np.zeros(len(self.weights))
for c, component in enumerate(self.cost_function_components):
distances[c] = component(item_a, time_a, item_b, time_b, self.max_values[c])
total_distance = np.sum(self.weights * distances)
return total_distance
class TrackMatcher(object):
"""
TrackMatcher
"""
def __init__(self, cost_function_components, weights, max_values):
self.cost_function_components = cost_function_components
self.weights = weights if weights.sum() == 1 else weights / weights.sum()
self.max_values = max_values
def match_tracks(self, set_a, set_b):
costs = self.track_cost_matrix(set_a, set_b) * 100
min_row_costs = costs.min(axis=1)
min_col_costs = costs.min(axis=0)
good_rows = np.where(min_row_costs < 100)[0]
good_cols = np.where(min_col_costs < 100)[0]
assignments = []
if len(good_rows) > 0 and len(good_cols) > 0:
munk = Munkres()
initial_assignments = munk.compute(costs[np.meshgrid(good_rows, good_cols, indexing='ij')].tolist())
initial_assignments = [(good_rows[x[0]], good_cols[x[1]]) for x in initial_assignments]
for a in initial_assignments:
if costs[a[0], a[1]] < 100:
assignments.append(a)
return assignments
def track_cost_matrix(self, set_a, set_b):
costs = np.zeros((len(set_a), len(set_b)))
for a, item_a in enumerate(set_a):
for b, item_b in enumerate(set_b):
costs[a, b] = self.track_cost_function(item_a, item_b)
return costs
def track_cost_function(self, item_a, item_b):
distances = np.zeros(len(self.weights))
for c, component in enumerate(self.cost_function_components):
distances[c] = component(item_a, item_b, self.max_values[c])
total_distance = np.sum(self.weights * distances)
return total_distance
def centroid_distance(item_a, time_a, item_b, time_b, max_value):
"""
Euclidean distance between the centroids of item_a and item_b.
:param item_a:
:param time_a:
:param item_b:
:param time_b:
:param max_value:
:return:
"""
ax, ay = item_a.center_of_mass(time_a)
bx, by = item_b.center_of_mass(time_b)
return np.minimum(np.sqrt((ax - bx) ** 2 + (ay - by) ** 2), max_value) / float(max_value)
def shifted_centroid_distance(item_a, time_a, item_b, time_b, max_value):
ax, ay = item_a.center_of_mass(time_a)
bx, by = item_b.center_of_mass(time_b)
if time_a < time_b:
bx = bx - item_b.u
by = by - item_b.v
else:
ax = ax - item_a.u
ay = ay - item_a.v
return np.minimum(np.sqrt((ax - bx) ** 2 + (ay - by) ** 2), max_value) / float(max_value)
def closest_distance(item_a, time_a, item_b, time_b, max_value):
"""
Euclidean distance between the pixels in item_a and item_b closest to each other.
"""
return np.minimum(item_a.closest_distance(time_a, item_b, time_b), max_value) / float(max_value)
def percentile_distance(item_a, time_a, item_b, time_b, max_value, percentile=2):
return np.minimum(item_a.percentile_distance(time_a, item_b, time_b, percentile), max_value) / float(max_value)
def ellipse_distance(item_a, time_a, item_b, time_b, max_value):
"""
Calculate differences in the properties of ellipses fitted to each object.
"""
ts = np.array([0, np.pi])
ell_a = item_a.get_ellipse_model(time_a)
ell_b = item_b.get_ellipse_model(time_b)
ends_a = ell_a.predict_xy(ts)
ends_b = ell_b.predict_xy(ts)
distances = np.sqrt((ends_a[:, 0:1] - ends_b[:, 0:1].T) ** 2 + (ends_a[:, 1:] - ends_b[:, 1:].T) ** 2)
return np.minimum(distances[0, 1], max_value) / float(max_value)
def nonoverlap(item_a, time_a, item_b, time_b, max_value):
"""
Percentage of pixels in each object that do not overlap with the other object
"""
return np.minimum(1 - item_a.count_overlap(time_a, item_b, time_b), max_value)
def max_intensity(item_a, time_a, item_b, time_b, max_value):
"""
RMS Difference in intensities.
"""
intensity_a = item_a.max_intensity(time_a)
intensity_b = item_b.max_intensity(time_b)
diff = np.sqrt((intensity_a - intensity_b) ** 2)
return np.minimum(diff, max_value) / float(max_value)
def area_difference(item_a, time_a, item_b, time_b, max_value):
"""
RMS Difference in object areas.
"""
size_a = item_a.size(time_a)
size_b = item_b.size(time_b)
diff = np.sqrt((size_a - size_b) ** 2)
return np.minimum(diff, max_value) / float(max_value)
def mean_minimum_centroid_distance(item_a, item_b, max_value):
"""
RMS difference in the minimum distances from the centroids of one track to the centroids of another track
"""
centroids_a = np.array([item_a.center_of_mass(t) for t in item_a.times])
centroids_b = np.array([item_b.center_of_mass(t) for t in item_b.times])
distance_matrix = (centroids_a[:, 0:1] - centroids_b.T[0:1]) ** 2 + (centroids_a[:, 1:] - centroids_b.T[1:]) ** 2
mean_min_distances = np.sqrt(distance_matrix.min(axis=0).mean() + distance_matrix.min(axis=1).mean())
return mean_min_distances / float(max_value)
def mean_min_time_distance(item_a, item_b, max_value):
times_a = item_a.times.reshape((item_a.times.size, 1))
times_b = item_b.times.reshape((1, item_b.times.size))
distance_matrix = (times_a - times_b) ** 2
mean_min_distances = np.sqrt(distance_matrix.min(axis=0).mean() + distance_matrix.min(axis=1).mean())
return mean_min_distances / float(max_value)
def duration_distance(item_a, item_b, max_value):
duration_a = item_a.times.size
duration_b = item_b.times.size
return np.abs(duration_a - duration_b) / float(max_value)
| {
"content_hash": "eb0f292fabe13818b0e2c55d1fdc40c0",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 119,
"avg_line_length": 40.390243902439025,
"alnum_prop": 0.6234299516908213,
"repo_name": "djgagne/hagelslag-unidata",
"id": "cfae3f002032ab8f3bc29f48ea9f22d0b67c19ce",
"size": "8280",
"binary": false,
"copies": "1",
"ref": "refs/heads/unidata_workshop_2015",
"path": "hagelslag/processing/ObjectMatcher.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "172294"
}
],
"symlink_target": ""
} |
#
import unittest
import os
import sys
import glob
import unittest
def getSuite(auto=True):
pythonFiles = glob.glob(os.path.join(os.path.dirname(__file__), "*.py"))
sys.path.insert(0, os.path.dirname(__file__))
testSuite = unittest.TestSuite()
for fname in pythonFiles:
if os.path.basename(fname) in ["__init__.py", "testAll.py"]:
continue
modName = os.path.splitext(os.path.basename(fname))[0]
try:
module = __import__(modName)
except ImportError:
print("Failed to import %s" % fname)
continue
if hasattr(module, "getSuite"):
testSuite.addTest(module.getSuite(auto))
return testSuite
def main(auto=True):
return unittest.TextTestRunner(verbosity=2).run(getSuite(auto=auto))
if __name__ == '__main__':
if len(sys.argv) > 1:
auto = False
else:
auto = True
if main(auto).wasSuccessful():
print("Test suite was successful")
sys.exit(0)
else:
print("Test suite failed")
sys.exit(1)
| {
"content_hash": "369f797fd61b332002d76f83b8cb4dcb",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 76,
"avg_line_length": 28.13157894736842,
"alnum_prop": 0.5958840037418148,
"repo_name": "vasole/fisx",
"id": "91b7c59e76c8b2c16414ac2e7361e9466f2b1263",
"size": "2459",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/fisx/tests/testAll.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "490407"
},
{
"name": "CSS",
"bytes": "32953"
},
{
"name": "Cython",
"bytes": "106933"
},
{
"name": "HTML",
"bytes": "1365037"
},
{
"name": "JavaScript",
"bytes": "129540"
},
{
"name": "Makefile",
"bytes": "2036"
},
{
"name": "Python",
"bytes": "84065"
},
{
"name": "Shell",
"bytes": "7137"
}
],
"symlink_target": ""
} |
import logging
from tracker.configs.celeryconfig import CELERY_TRACKER_PLUGINS
from tracker.plugins.fluent import FluentPlugin
try:
from .utils import MockStorage
except (ImportError, ValueError):
from tests.utils import MockStorage
config = CELERY_TRACKER_PLUGINS["fluent"]
def get_kwargs():
return {
"logger": logging.getLogger("test_fluent"),
"tag": config["tag"],
"storage": MockStorage(),
"host": config["host"],
"port": config["port"],
}
def test_event():
plugin = FluentPlugin(**get_kwargs())
assert isinstance(plugin.pop_event(), (dict, ))
def test_send():
def assert_sender(x):
assert isinstance(x, (dict, ))
plugin = FluentPlugin(**get_kwargs())
plugin.sender.send = assert_sender
plugin.running()
| {
"content_hash": "39146e26c7dab241aef7d00900f5a119",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 63,
"avg_line_length": 22.416666666666668,
"alnum_prop": 0.6530359355638166,
"repo_name": "ikeikeikeike/celery-tracker",
"id": "20bce9e9a5ad563390a6bf9d3051cff8dac52c48",
"size": "807",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_fluent.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "63975"
}
],
"symlink_target": ""
} |
"""This module is used for CLI module initialization and execution"""
import os
import inspect
from lightbulb.core.utils.ipc import Pipe
from threading import Thread
import imp
import sys
META = {
'author': 'George Argyros, Ioannis Stais',
'name': 'MANAGE',
'description': 'Executes Core Modules',
'type': 'CORE',
'options': [
('MODULE_A', None, True, 'File to save learned filter if no bypass is found', 5),
('MODULE_B', None, False, 'Handler for membership query function', 0),
],
'comments': ['']
}
sys.path.insert(1, imp.find_module('lightbulb')[1]+'/modules')
sys.path.insert(1, imp.find_module('lightbulb')[1]+'/core/utils/')
def operate_learn(module, configuration):
"""
Initiates the execution of a module
Args:
module (class): The module class to be initiated
configuration (dict): The module options
Returns:
list: A list with the results and stats as tuples
"""
initmodule = module(configuration)
initmodule.learn()
name, bypass = initmodule.getresult()
return [(name, bypass)] + initmodule.stats()
def operate_diff_part(module, configuration, shared_memory, cross):
"""
Initiates the execution of a module
Args:
module (class): The module class to be initiated
configuration (dict): The module options
shared_memory (list): A shared memory for intermodule communication
cross (int): The identifier for this instance
Returns:
None
"""
initmodule = module(configuration, shared_memory, cross)
if cross == 1:
shared_memory[4] = initmodule
initmodule.learn()
name, bypass = initmodule.getresult()
shared_memory[6] = [(name, bypass)]+initmodule.stats()
else:
shared_memory[5] = initmodule
initmodule.learn()
def operate_diff(module_class_A, configuration_A, module_class_B, configuration_B):
"""
Initiates the execution of two modules in parallel
Args:
module_class_A (class): The module class to be initiated
configuration_A (dict): The module options
module_class_B (class): The module class to be initiated
configuration_B (dict): The module options
Returns:
None
"""
object_a = None
object_b = None
shared_a = None
shared_b = None
target_a = None
target_b = None
target_a_pipe, target_b_pipe = Pipe()
result = []
shared_memory = [target_a_pipe, target_b_pipe, object_a, object_b, shared_a, shared_b, result]
target_a = Thread(target=operate_diff_part,
args=(module_class_A, configuration_A, shared_memory, 1))
target_a.setDaemon(True)
target_a.start()
target_b = Thread(target=operate_diff_part,
args=(module_class_B, configuration_B, shared_memory, 2))
target_b.setDaemon(True)
target_b.start()
while True:
target_a.join(600)
if not target_a.isAlive():
target_a = None
break
print 'thread one finished'
# End Cross _check
target_b.join(0)
target_b = None
print 'thread two finished'
return shared_memory[6]
def manage(module_name, configuration):
"""
This function manages the module initialization and execution
Args:
module_name (class): The module class to be initiated
configuration (dict): The module options
Returns:
list: A list with the results and stats as tuples
"""
module = __import__(module_name)
classmembers = [classmodule for classmodule in inspect.getmembers(module, inspect.isclass)
if classmodule[0] == 'Module' or classmodule[0] == 'Module_A'
or classmodule[0] == 'Module_B']
if len(classmembers) == 1:
return operate_learn(classmembers[0][1], configuration)
else:
if classmembers[0][0] == "Module_A":
return operate_diff(classmembers[0][1], configuration, classmembers[1][1], configuration)
else:
return operate_diff(classmembers[1][1], configuration, classmembers[0][1], configuration)
| {
"content_hash": "3c6d3ab2e1a6a26bde5da184c94d0cdd",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 101,
"avg_line_length": 32.669291338582674,
"alnum_prop": 0.6331646179802362,
"repo_name": "lightbulb-framework/lightbulb-framework",
"id": "f7ad653bea7c7873bdf4788e10185539fe9eff53",
"size": "4149",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lightbulb/core/operate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "46517"
},
{
"name": "Makefile",
"bytes": "149"
},
{
"name": "Python",
"bytes": "790078"
},
{
"name": "Shell",
"bytes": "4829"
},
{
"name": "Yacc",
"bytes": "267605"
}
],
"symlink_target": ""
} |
from .bloc import *
import pygame, configparser
from pygame.locals import *
class Tile:
"""Classe représentant un tile, cad une case de la carte"""
def __init__(self, nombreCouches):
self._bloc, self._praticabilite, self._praticabiliteEau, self._nombreCouches, self._blocsSupplementaires = list(), [False]*nombreCouches, False, nombreCouches, dict()
def modifierPraticabilite(self, indice, nouvelleValeur, recalcul=True):
self._bloc[indice].praticabilite = nouvelleValeur
if recalcul is True:
self.recalculerPraticabilites()
def ajouterTileEtendu(self, couche, praticabilite, positionSource, nomTileset):
self._blocsSupplementaires.setdefault(couche, [])
self._blocsSupplementaires[couche].append((praticabilite, positionSource, nomTileset))
self.recalculerPraticabilites()
def recalculerPraticabilites(self):
i, toutFaux, tileEau = 0, False, False
while i < len(self._bloc):
praticabiliteActuelle, praticabiliteEauActuelle = self._bloc[i].praticabilite, self._bloc[i].eau
if praticabiliteActuelle is False:
toutFaux = True
if praticabiliteEauActuelle is True and tileEau is False:
tileEau = True
if praticabiliteActuelle is True and i == 0 and self._bloc[i].vide is True: #Bloc vide en couche 0 : c'est du vide, donc impraticable
toutFaux = True
if i in self._blocsSupplementaires.keys():
for (praticabiliteBlocSupplementaire, positionSource, nomTileset) in self._blocsSupplementaires[i]:
if praticabiliteBlocSupplementaire is False:
toutFaux = True
if toutFaux is True and self._bloc[i].pont is False:
self._praticabilite[i] = False
else:
self._praticabilite[i] = True
if tileEau is True:
if self._praticabiliteEau is False:
self._praticabiliteEau = [False]*len(self._praticabilite)
if praticabiliteEauActuelle is True or praticabiliteActuelle is True:
self._praticabiliteEau[i] = True
i += 1
def _getBloc(self):
return self._bloc
def _getPraticabilite(self):
return self._praticabilite
def _getPraticabiliteEau(self):
return self._praticabiliteEau
bloc = property(_getBloc)
praticabilite = property(_getPraticabilite)
praticabiliteEau = property(_getPraticabiliteEau)
| {
"content_hash": "fee88ec1b4e2f8fccb0a399c6d7954a6",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 174,
"avg_line_length": 45.5,
"alnum_prop": 0.6459968602825745,
"repo_name": "Rastagong/narro",
"id": "a0581dc45a7a75da3f51fed759efdb7063c0309a",
"size": "2570",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "584066"
}
],
"symlink_target": ""
} |
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
# CUSTOM FILE SIZE VALIDATOR
def validate_image(fieldfile_obj):
"""
Limit image size upload
"""
filesize = fieldfile_obj.file.size
megabyte_limit = 0.5
if filesize > megabyte_limit*1024*1024:
raise ValidationError("Max file size is %sMB" % str(megabyte_limit))
class Profile(models.Model):
"""
Author Model
"""
user = models.OneToOneField(
User,
on_delete=models.CASCADE
)
profile_picture = models.ImageField(
upload_to='images/%Y/%m/%d',
validators=[validate_image],
blank=True,
null=True
)
profile_name = models.CharField(
verbose_name='Name',
null=True,
blank=True,
max_length=50
)
profile_email = models.EmailField(
verbose_name='Email Address',
null=True,
blank=True
)
profile_location = models.CharField(
verbose_name='Origin/City',
null=True,
blank=True,
max_length=50
)
profile_github = models.URLField(
verbose_name='Github URL',
null=True,
blank=True
)
slug = models.SlugField()
is_created = models.DateTimeField(
null=True,
blank=True
)
is_moderator = models.BooleanField(
default=False,
)
def __str__(self):
return str(self.user)
def save(self, **kwargs):
if not self.slug:
from djangoid.utils import get_unique_slug
self.slug = get_unique_slug(instance=self, field='profile_name')
super(Profile, self).save(**kwargs)
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
"""
Automatically Create User when Login
"""
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
"""
Automatically Create User when Login
"""
instance.profile.save()
| {
"content_hash": "357c8ca86600cb6760586b8cc62196e2",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 76,
"avg_line_length": 22.397959183673468,
"alnum_prop": 0.6182232346241457,
"repo_name": "django-id/website",
"id": "82385952ca66d768cdbc258d526c5a44db449556",
"size": "2195",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app_author/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4048"
},
{
"name": "Dockerfile",
"bytes": "307"
},
{
"name": "HTML",
"bytes": "85194"
},
{
"name": "Python",
"bytes": "51563"
},
{
"name": "SCSS",
"bytes": "13326"
},
{
"name": "Shell",
"bytes": "120"
}
],
"symlink_target": ""
} |
from ciphertext import Ciphertext
import time
c = Ciphertext()
c.text_in('hello')
assert c.text_out() == 'hello'
d = Ciphertext()
d.text_in('No King of the Isles had ever needed a Hand')
with open('sample.txt','r') as source:
natural_text = source.read()
natural = Ciphertext()
natural.text_in(natural_text)
natural.pair_frequencies()
natural.triplet_frequencies()
natural.quadruplet_frequencies()
| {
"content_hash": "3ec45c0b0c7a681dc6a83fa5e6657a29",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 56,
"avg_line_length": 23.823529411764707,
"alnum_prop": 0.7333333333333333,
"repo_name": "paulsbrookes/subcipher",
"id": "9e9aaa0fbaff5dd353ea40ce5bf702c625d79509",
"size": "405",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "index_based/test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "270115"
},
{
"name": "Python",
"bytes": "59675"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('topics', '0047_auto_20170507_0913'),
]
operations = [
migrations.AlterField(
model_name='glossaryterm',
name='term',
field=models.CharField(max_length=200, null=True, unique=True),
),
]
| {
"content_hash": "a0629960c859da33b1a1d03ade3b4c38",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 75,
"avg_line_length": 22.555555555555557,
"alnum_prop": 0.603448275862069,
"repo_name": "uccser/cs-unplugged",
"id": "4007aa59158d834d49e03b2d601441789f13a17c",
"size": "479",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "csunplugged/topics/migrations/0048_auto_20170507_2226.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "7927"
},
{
"name": "HTML",
"bytes": "432891"
},
{
"name": "JavaScript",
"bytes": "104806"
},
{
"name": "Python",
"bytes": "1257568"
},
{
"name": "SCSS",
"bytes": "67560"
},
{
"name": "Shell",
"bytes": "12461"
}
],
"symlink_target": ""
} |
import json
import yaml
import os
import sys
import re
from collections import OrderedDict
from ptemplate.template import Template
GIT_REPO = "https://github.com/jboss-openshift/application-templates.git"
REPO_NAME = "application-templates/"
TEMPLATE_DOCS = "docs/"
APPLICATION_DIRECTORIES = ("amq","eap","webserver","decisionserver","processserver","datagrid","datavirt","sso")
template_dirs = [ 'amq', 'eap', 'secrets', 'webserver', 'decisionserver', 'processserver', 'datagrid', 'datavirt', 'sso']
amq_ssl_desc = None
LINKS = {"jboss-eap64-openshift:1.8": "../../eap/eap-openshift{outfilesuffix}[`jboss-eap-6/eap64-openshift`]",
"jboss-eap70-openshift:1.7": "../../eap/eap-openshift{outfilesuffix}[`jboss-eap-7/eap70-openshift`]",
"jboss-eap71-openshift:1.2": "../../eap/eap-openshift{outfilesuffix}[`jboss-eap-7/eap71-openshift`]",
"jboss-webserver31-tomcat7-openshift:1.1": "../../webserver/tomcat7-openshift{outfilesuffix}[`jboss-webserver-3/webserver31-tomcat7-openshift`]",
"jboss-webserver31-tomcat8-openshift:1.1": "../../webserver/tomcat8-openshift{outfilesuffix}[`jboss-webserver-3/webserver31-tomcat8-openshift`]",
"jboss-decisionserver64-openshift:1.2": "../../decisionserver/decisionserver-openshift{outfilesuffix}[`jboss-decisionserver-6/decisionserver64-openshift`]",
"jboss-processserver64-openshift:1.2": "../../processserver/processserver-openshift{outfilesuffix}[`jboss-processserver-6/processserver64-openshift`]",
"jboss-datavirt63-openshift:1.4": "../../datavirt/datavirt-openshift{outfilesuffix}[`jboss-datavirt-6/datavirt63-openshift`]",
"redhat-sso71-openshift:1.3": "../../sso/sso-openshift{outfilesuffix}[`redhat-sso-7/sso71-openshift`]",
"redhat-sso72-openshift:1.0": "../../sso/sso-openshift{outfilesuffix}[`redhat-sso-7/sso72-openshift`]",
}
PARAMETER_VALUES = {"APPLICATION_DOMAIN": "secure-app.test.router.default.local", \
"SOURCE_REPOSITORY_URL": "https://github.com/jboss-openshift/openshift-examples.git", \
"SOURCE_REPOSITORY_REF": "master", \
"CONTEXT_DIR": "helloworld", \
"GITHUB_WEBHOOK_SECRET": "secret101", \
"GENERIC_WEBHOOK_SECRET": "secret101"}
autogen_warning="""////
AUTOGENERATED FILE - this file was generated via ./gen_template_docs.py.
Changes to .adoc or HTML files may be overwritten! Please change the
generator or the input template (./*.in)
////
"""
def generate_templates():
for directory in template_dirs:
if not os.path.isdir(directory):
continue
for template in sorted(os.listdir(directory)):
if template[-5:] != '.json' and template[-5:] != '.yaml':
continue
generate_template(os.path.join(directory, template))
def generate_template(path):
if "image-stream" in path:
return
with open(path) as data_file:
if path[-5:] == '.json':
data = json.load(data_file, object_pairs_hook=OrderedDict)
outfile = TEMPLATE_DOCS + re.sub('\.json$', '', path) + '.adoc'
else:
data = yaml.load(data_file)
outfile = TEMPLATE_DOCS + re.sub('\.yaml$', '', path) + '.adoc'
if not 'labels' in data or not "template" in data["labels"]:
sys.stderr.write("no template label for template %s, can't generate documentation\n"%path)
return
outdir = os.path.dirname(outfile)
if not os.path.exists(outdir):
os.makedirs(outdir)
with open(outfile, "w") as text_file:
print ("Generating %s..." % outfile)
text_file.write(autogen_warning)
text_file.write(createTemplate(data, path))
def createTemplate(data, path):
templater = Template()
templater.template = open('./template.adoc.in').read()
tdata = { "template": data['labels']['template'], }
# Fill in the template description, if supplied
if 'annotations' in data['metadata'] and 'description' in data['metadata']['annotations']:
tdata['description'] = data['metadata']['annotations']['description']
# special case: AMQ SSL templates have additional description
global amq_ssl_desc
if re.match('amq', path) and re.match('.*ssl\.json$', path):
if not amq_ssl_desc:
with open('amq-ssl.adoc.in','r') as tmp:
amq_ssl_desc = tmp.read()
tdata['description'] += "\n\n" + amq_ssl_desc
# special case: JDG templates have additional description
if re.match('datagrid', path):
with open('datagrid.adoc.in','r') as tmp:
datagrid_desc = tmp.read()
tdata['description'] += "\n\n" + datagrid_desc
# special case: JDG templates have additional description
if re.match('sso', path):
with open('sso.adoc.in','r') as tmp:
sso_desc = tmp.read()
tdata['description'] += "\n\n" + sso_desc
# Fill in template parameters table, if there are any
if ("parameters" in data and "objects" in data) and len(data["parameters"]) > 0:
tdata['parameters'] = [{ 'parametertable': createParameterTable(data) }]
if "objects" in data:
tdata['objects'] = [{}]
# Fill in sections if they are present in the JSON (createObjectTable version)
for kind in ['Service', 'Route', 'BuildConfig', 'PersistentVolumeClaim']:
if 0 >= len([ x for x in data["objects"] if kind == x["kind"] ]):
continue
tdata['objects'][0][kind] = [{ "table": createObjectTable(data, kind) }]
# Fill in sections if they are present in the JSON (createContainerTable version)
for kind in ['image', 'readinessProbe', 'ports', 'env']:
if 0 >= len([obj for obj in data["objects"] if obj["kind"] == "DeploymentConfig"]):
continue
tdata['objects'][0][kind] = [{ "table": createContainerTable(data, kind) }]
# Fill in sections if they are present in the JSON (createDeployConfigTable version)
for kind in ['triggers', 'replicas', 'volumes', 'serviceAccountName']:
if 0 >= len([obj for obj in data["objects"] if obj["kind"] == "DeploymentConfig"]):
continue
if kind in ['volumes', 'serviceAccountName']:
specs = [d["spec"]["template"]["spec"] for d in data["objects"] if d["kind"] == "DeploymentConfig"]
matches = [spec[kind] for spec in specs if spec.get(kind) is not None]
if len(matches) <= 0:
continue
tdata['objects'][0][kind] = [{ "table": createDeployConfigTable(data, kind) }]
# the 'secrets' section is not relevant to the secrets templates
if not re.match('^secrets', path):
specs = [d["spec"]["template"]["spec"] for d in data["objects"] if d["kind"] == "DeploymentConfig"]
serviceAccountName = [spec["serviceAccountName"] for spec in specs if spec.get("serviceAccountName") is not None]
# our 'secrets' are always attached to a service account
# only include the secrets section if we have defined serviceAccount(s)
if len(serviceAccountName) > 0:
if re.match('^datavirt', path):
tdata['objects'][0]['secrets'] = [{ "secretName": "datavirt-app-secret", "secretFile": "datavirt-app-secret.yaml" }]
else:
secretName = [param["value"] for param in data["parameters"] if "value" in param and param["value"].endswith("-app-secret")]
tdata['objects'][0]['secrets'] = [{ "secretName": secretName[0], "secretFile": secretName[0] + ".json" }]
# currently the clustering section applies only to EAP templates
if re.match('^eap', path):
tdata['objects'][0]['clustering'] = [{}]
return templater.render(tdata)
def possibly_fix_width(text):
"""Heuristic to possibly mark-up text as monospaced if it looks like
a URL, or an environment variable name, etc."""
if text in ['', '--']:
return text
# stringify the arguments
if type(text) not in [type('string'), type(u'Unicode')]:
text = "%r" % text
if text[0] in "$/" or "}" == text[-1] or re.match(r'^[A-Z_\${}:-]+$', text):
return '`%s`' % text
return text
def buildRow(columns):
return "\n|" + " | ".join(map(possibly_fix_width, columns))
def getVolumePurpose(name):
name = name.split("-")
if("certificate" in name or "keystore" in name or "secret" in name):
return "ssl certs"
elif("amq" in name):
return "kahadb"
elif("pvol" in name):
return name[1]
else:
return "--"
# Used for getting image enviorment variables into parameters table and parameter
# descriptions into image environment table
def getVariableInfo(data, name, value):
for d in data:
if(d["name"] == name or name[1:] in d["name"] or d["name"][1:] in name):
return d[value]
if(value == "value" and name in PARAMETER_VALUES.keys()):
return PARAMETER_VALUES[name]
else:
return "--"
def createParameterTable(data):
text = ""
for param in data["parameters"]:
deploy = [d["spec"]["template"]["spec"]["containers"][0]["env"] for d in data["objects"] if d["kind"] == "DeploymentConfig"]
environment = [item for sublist in deploy for item in sublist]
envVar = getVariableInfo(environment, param["name"], "name")
value = param["value"] if param.get("value") else getVariableInfo(environment, param["name"], "value")
req = param["required"] if "required" in param else "?"
columns = [param["name"], envVar, param["description"], value, req]
text += buildRow(columns)
return text
def createObjectTable(data, tableKind):
text = ""
columns =[]
for obj in data["objects"]:
if obj["kind"] == 'Service' and tableKind == 'Service':
addDescription=True
ports = obj["spec"]["ports"]
text += "\n." + str(len(ports)) + "+| `" + obj["metadata"]["name"] + "`"
for p in ports:
columns = ["port", "name"]
columns = [str(p[col]) if p.get(col) else "--" for col in columns]
text += buildRow(columns)
if addDescription:
text += "\n." + str(len(ports)) + "+| " + obj["metadata"]["annotations"]["description"]
addDescription=False
continue
elif obj["kind"] == 'Route' and tableKind == 'Route':
if(obj["spec"].get("tls")):
columns = [obj["id"], ("TLS "+ obj["spec"]["tls"]["termination"]), obj["spec"]["host"]]
else:
columns = [obj["id"], "none", obj["spec"]["host"]]
elif obj["kind"] == 'BuildConfig' and tableKind == 'BuildConfig':
if obj["spec"]["strategy"]["type"] == 'Source':
s2i = obj["spec"]["strategy"]["sourceStrategy"]["from"]["name"]
link = " link:" + LINKS[s2i]
elif obj["spec"]["strategy"]["type"] == 'Docker':
s2i = obj["spec"]["strategy"]["dockerStrategy"]["dockerfilePath"]
link = ""
columns = [s2i, link, obj["spec"]["output"]["to"]["name"], ", ".join([x["type"] for x in obj["spec"]["triggers"] ]) ]
elif obj["kind"] == 'PersistentVolumeClaim' and tableKind == 'PersistentVolumeClaim':
columns = [obj["metadata"]["name"], obj["spec"]["accessModes"][0]]
if(obj["kind"] == tableKind):
text += buildRow(columns)
return text
def createDeployConfigTable(data, table):
text = ""
deploymentConfig = (obj for obj in data["objects"] if obj["kind"] == "DeploymentConfig")
for obj in deploymentConfig:
columns = []
deployment = obj["metadata"]["name"]
spec = obj["spec"]
template = spec["template"]["spec"]
if(template.get(table) or spec.get(table)):
if table == "triggers":
columns = [deployment, spec["triggers"][0]["type"] ]
elif table == "replicas":
columns = [deployment, str(spec["replicas"]) ]
elif table == "serviceAccountName":
columns = [deployment, template["serviceAccountName"]]
elif table == "volumes":
volumeMount = obj["spec"]["template"]["spec"]["containers"][0]["volumeMounts"][0]
name = template["volumes"][0]["name"]
readOnly = str(volumeMount["readOnly"]) if "readOnly" in volumeMount else "false"
columns = [deployment, name, volumeMount["mountPath"], getVolumePurpose(name), readOnly]
text += buildRow(columns)
return text
def createContainerTable(data, table):
text = ""
deploymentConfig = (obj for obj in data["objects"] if obj["kind"] == "DeploymentConfig")
for obj in deploymentConfig:
columns = []
deployment = obj["metadata"]["name"]
container = obj["spec"]["template"]["spec"]["containers"][0]
if table == "image":
columns = [deployment, container["image"]]
text += buildRow(columns)
elif table == "readinessProbe": #abstract out
if container.get("readinessProbe"):
text += ("\n." + deployment + "\n----\n" \
+ " ".join(container["readinessProbe"]["exec"]["command"]) \
+ "\n----\n")
elif table == "ports":
text += "\n." + str(len(container["ports"])) + "+| `" + deployment + "`"
ports = container["ports"]
for p in ports:
columns = ["name", "containerPort", "protocol"]
columns = [str(p[col]) if p.get(col) else "--" for col in columns]
text += buildRow(columns)
elif table == "env":
environment = container["env"]
text += "\n." + str(len(environment)) + "+| `" + deployment + "`"
for env in environment:
columns = [env["name"], getVariableInfo(data["parameters"], env["name"], "description")]
# TODO: handle valueFrom instead of value
if "value" in env:
columns.append(env["value"])
else:
columns.append("--")
text += buildRow(columns)
return text
fullname = {
"amq": "JBoss A-MQ",
"eap": "JBoss EAP",
"webserver": "JBoss Web Server",
"decisionserver": "Red Hat JBoss BRMS decision server",
"processserver": "Red Hat JBoss BPM Suite intelligent process server",
"datagrid": "JBoss Data Grid",
"datavirt": "Red Hat JBoss Data Virtualization",
"sso": "Red Hat SSO",
}
def generate_readme():
"""Generates a README page for the template documentation."""
with open('docs/README.adoc','w') as fh:
fh.write(autogen_warning)
# page header
fh.write(open('./README.adoc.in').read())
for directory in sorted(template_dirs):
if not os.path.isdir(directory):
continue
# section header
fh.write('\n== %s\n\n' % fullname.get(directory, directory))
# links
for template in [ os.path.splitext(x)[0] for x in sorted(os.listdir(directory)) ]:
# XXX: Hack for 1.3 release, which excludes processserver
if template != "processserver-app-secret" and "image-stream" not in template:
fh.write("* link:./%s/%s.adoc[%s]\n" % (directory, template, template))
# release notes
fh.write(open('./release-notes.adoc.in').read())
# expects to be run from the root of the repository
if __name__ == "__main__":
# the user may specify a particular template to parse,
if 1 < len(sys.argv):
sys.argv.pop(0)
for t in sys.argv:
generate_template(t)
# otherwise we'll look for them all (and do an index)
else:
generate_templates()
generate_readme()
| {
"content_hash": "45a5c7b851449eaa8c9bbfe8e40f93e5",
"timestamp": "",
"source": "github",
"line_count": 346,
"max_line_length": 166,
"avg_line_length": 45.69653179190752,
"alnum_prop": 0.5912971981531845,
"repo_name": "rcernich/application-templates",
"id": "a9a7d5a8ac0391d8d7eb4afa65319ad1f2de42b2",
"size": "16268",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gen_template_docs.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16268"
}
],
"symlink_target": ""
} |
from mailsync import defaults
class Settings(object):
def __init__(self):
# update this dict from the defaults dictionary (but only for ALL_CAPS settings)
for setting in dir(defaults):
if setting == setting.upper():
setattr(self, setting, getattr(defaults, setting)) | {
"content_hash": "ca885d92c24ac59dead54c83e670db69",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 88,
"avg_line_length": 35.111111111111114,
"alnum_prop": 0.6455696202531646,
"repo_name": "rebelact/mailsync-app",
"id": "5843a9b11a61f33c06f545ead0aa4fa614bfa401",
"size": "316",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mailsync/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "35239"
},
{
"name": "JavaScript",
"bytes": "12940"
},
{
"name": "Python",
"bytes": "92938"
},
{
"name": "Ruby",
"bytes": "833"
},
{
"name": "Shell",
"bytes": "4580"
}
],
"symlink_target": ""
} |
"""
Scheduler base class that all Schedulers should inherit from
"""
from oslo.config import cfg
from cinder import db
from cinder.openstack.common import importutils
from cinder.openstack.common import timeutils
from cinder import utils
from cinder.volume import rpcapi as volume_rpcapi
scheduler_driver_opts = [
cfg.StrOpt('scheduler_host_manager',
default='cinder.scheduler.host_manager.HostManager',
help='The scheduler host manager class to use'),
cfg.IntOpt('scheduler_max_attempts',
default=3,
help='Maximum number of attempts to schedule an volume'),
]
CONF = cfg.CONF
CONF.register_opts(scheduler_driver_opts)
def volume_update_db(context, volume_id, host):
'''Set the host and set the scheduled_at field of a volume.
:returns: A Volume with the updated fields set properly.
'''
now = timeutils.utcnow()
values = {'host': host, 'scheduled_at': now}
return db.volume_update(context, volume_id, values)
class Scheduler(object):
"""The base class that all Scheduler classes should inherit from."""
def __init__(self):
self.host_manager = importutils.import_object(
CONF.scheduler_host_manager)
self.volume_rpcapi = volume_rpcapi.VolumeAPI()
def get_host_list(self):
"""Get a list of hosts from the HostManager."""
return self.host_manager.get_host_list()
def get_service_capabilities(self):
"""Get the normalized set of capabilities for the services.
"""
return self.host_manager.get_service_capabilities()
def update_service_capabilities(self, service_name, host, capabilities):
"""Process a capability update from a service node."""
self.host_manager.update_service_capabilities(service_name,
host,
capabilities)
def hosts_up(self, context, topic):
"""Return the list of hosts that have a running service for topic."""
services = db.service_get_all_by_topic(context, topic)
return [service['host']
for service in services
if utils.service_is_up(service)]
def host_passes_filters(self, context, volume_id, host, filter_properties):
"""Check if the specified host passes the filters."""
raise NotImplementedError(_("Must implement host_passes_filters"))
def schedule(self, context, topic, method, *_args, **_kwargs):
"""Must override schedule method for scheduler to work."""
raise NotImplementedError(_("Must implement a fallback schedule"))
def schedule_create_volume(self, context, request_spec, filter_properties):
"""Must override schedule method for scheduler to work."""
raise NotImplementedError(_("Must implement schedule_create_volume"))
| {
"content_hash": "91e4f555f6e5da80fad485026e7ad996",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 79,
"avg_line_length": 37.14102564102564,
"alnum_prop": 0.6534345875043148,
"repo_name": "inkerra/cinder",
"id": "13114c6aabecacc0d680eafdaa4dff9eb67114cf",
"size": "3711",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cinder/scheduler/driver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3994935"
},
{
"name": "Shell",
"bytes": "8844"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class FillValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="fill", parent_name="volume.slices.x", **kwargs):
super(FillValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 1),
min=kwargs.pop("min", 0),
**kwargs,
)
| {
"content_hash": "372bd8ea05cfc776ecb0170ccc8fb31f",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 84,
"avg_line_length": 36.30769230769231,
"alnum_prop": 0.5847457627118644,
"repo_name": "plotly/plotly.py",
"id": "6bd4ea8cdb65ea911f710780fd17d011a66ee62a",
"size": "472",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/volume/slices/x/_fill.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
"""
Notes regarding the implementation of smallest and
largest:
Both functions must take two keyword arguments:
max_factor -- int
min_factor -- int, default 0
Their return value must be a tuple (value, factors) where value is the
palindrome itself, and factors is an iterable containing both factors of the
palindrome in arbitrary order.
"""
import unittest
from palindrome_products import smallest, largest
# Tests adapted from `problem-specifications//canonical-data.json` @ v1.2.0
class PalindromeProductsTest(unittest.TestCase):
def test_smallest_palindrome_from_single_digit_factors(self):
value, factors = smallest(min_factor=1, max_factor=9)
self.assertEqual(value, 1)
self.assertFactorsEqual(factors, [[1, 1]])
def test_largest_palindrome_from_single_digit_factors(self):
value, factors = largest(min_factor=1, max_factor=9)
self.assertEqual(value, 9)
self.assertFactorsEqual(factors, [[1, 9], [3, 3]])
def test_smallest_palindrome_from_double_digit_factors(self):
value, factors = smallest(min_factor=10, max_factor=99)
self.assertEqual(value, 121)
self.assertFactorsEqual(factors, [[11, 11]])
def test_largest_palindrome_from_double_digit_factors(self):
value, factors = largest(min_factor=10, max_factor=99)
self.assertEqual(value, 9009)
self.assertFactorsEqual(factors, [[91, 99]])
def test_smallest_palindrome_from_triple_digit_factors(self):
value, factors = smallest(min_factor=100, max_factor=999)
self.assertEqual(value, 10201)
self.assertFactorsEqual(factors, [[101, 101]])
def test_largest_palindrome_from_triple_digit_factors(self):
value, factors = largest(min_factor=100, max_factor=999)
self.assertEqual(value, 906609)
self.assertFactorsEqual(factors, [[913, 993]])
def test_smallest_palindrome_from_four_digit_factors(self):
value, factors = smallest(min_factor=1000, max_factor=9999)
self.assertEqual(value, 1002001)
self.assertFactorsEqual(factors, [[1001, 1001]])
def test_largest_palindrome_from_four_digit_factors(self):
value, factors = largest(min_factor=1000, max_factor=9999)
self.assertEqual(value, 99000099)
self.assertFactorsEqual(factors, [[9901, 9999]])
def test_empty_for_smallest_palindrome_if_none_in_range(self):
value, factors = smallest(min_factor=1002, max_factor=1003)
self.assertIsNone(value)
self.assertFactorsEqual(factors, [])
def test_empty_for_largest_palindrome_if_none_in_range(self):
value, factors = largest(min_factor=15, max_factor=15)
self.assertIsNone(value)
self.assertFactorsEqual(factors, [])
def test_error_for_smallest_palindrome_if_min_is_more_than_max(self):
with self.assertRaisesWithMessage(ValueError):
value, factors = smallest(min_factor=10000, max_factor=1)
def test_error_for_largest_palindrome_if_min_is_more_than_max(self):
with self.assertRaisesWithMessage(ValueError):
value, factors = largest(min_factor=2, max_factor=1)
# Utility functions
def setUp(self):
try:
self.assertRaisesRegex
except AttributeError:
self.assertRaisesRegex = self.assertRaisesRegexp
def assertRaisesWithMessage(self, exception):
return self.assertRaisesRegex(exception, r".+")
def assertFactorsEqual(self, actual, expected):
self.assertEqual(set(map(frozenset, actual)),
set(map(frozenset, expected)))
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "86cbd189f0caaa1ce6685023397df575",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 76,
"avg_line_length": 38.114583333333336,
"alnum_prop": 0.6848865810330691,
"repo_name": "smalley/python",
"id": "3735efb21c7ac2121e25805bf4e2d972e67b9fa4",
"size": "3659",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "exercises/palindrome-products/palindrome_products_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "583569"
},
{
"name": "Shell",
"bytes": "1255"
}
],
"symlink_target": ""
} |
"""
Move packages between labels.
"""
# Standard library imports
from __future__ import unicode_literals, print_function
import logging
# Local imports
from binstar_client import errors
from binstar_client.utils import get_server_api, parse_specs
logger = logging.getLogger('binstar.move')
def main(args):
aserver_api = get_server_api(args.token, args.site)
spec = args.spec
channels = aserver_api.list_channels(spec.user)
label_text = 'label' if (args.from_label and args.to_label) else 'channel'
from_label = args.from_label.lower()
to_label = args.to_label.lower()
if from_label not in channels:
raise errors.UserError(
"{} {} does not exist\n\tplease choose from: {}".format(
label_text.title(),
from_label,
', '.join(channels)
))
if from_label == to_label:
raise errors.UserError('--from-label and --to-label must be different')
# Add files to to_label
try:
aserver_api.add_channel(
to_label,
spec.user,
package=spec.package,
version=spec._version,
filename=spec._basename,
)
except Exception:
pass
# Remove files from from_label
try:
aserver_api.remove_channel(
from_label,
spec.user,
package=spec.package,
version=spec._version,
filename=spec._basename,
)
except Exception:
pass
# for binstar_file in files:
# print("Copied file: %(basename)s" % binstar_file)
# if files:
# logger.info("Copied %i files" % len(files))
# else:
# logger.warning("Did not copy any files. Please check your inputs "
# "with \n\n\tanaconda show %s" % spec)
def add_parser(subparsers):
parser = subparsers.add_parser(
'move',
help='Move packages between labels',
description=__doc__,
)
parser.add_argument(
'spec',
help='Package - written as user/package/version[/filename] '
'If filename is not given, move all files in the version',
type=parse_specs,
)
# TODO: To be implemented later on
# parser.add_argument(
# '--to-owner',
# help='User account to move package to (default: your account)',
# )
_from = parser.add_mutually_exclusive_group()
_to = parser.add_mutually_exclusive_group()
_from.add_argument(
'--from-label',
help='Label to move packages from',
default='main',
)
_to.add_argument(
'--to-label',
help='Label to move packages to',
default='main',
)
parser.set_defaults(main=main)
| {
"content_hash": "9fa6f76841a2f026e9303ec46858cd03",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 79,
"avg_line_length": 26.432692307692307,
"alnum_prop": 0.579119679883594,
"repo_name": "Anaconda-Platform/anaconda-client",
"id": "89fe4280046570f1a6ed5872aa874dc121f0bee7",
"size": "2773",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "binstar_client/commands/move.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "101"
},
{
"name": "Jupyter Notebook",
"bytes": "2976"
},
{
"name": "Python",
"bytes": "318160"
},
{
"name": "Ruby",
"bytes": "8"
},
{
"name": "Shell",
"bytes": "10280"
}
],
"symlink_target": ""
} |
import stripe
import logging
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
logger = logging.getLogger('simple_stripe')
if (not hasattr(settings, 'STRIPE_PUBLIC_KEY') or
settings.STRIPE_PUBLIC_KEY is None):
raise ImproperlyConfigured(
'STRIPE_PUBLIC_KEY not configured'
)
if (not hasattr(settings, 'STRIPE_SECRET_KEY') or
settings.STRIPE_SECRET_KEY is None):
raise ImproperlyConfigured(
'STRIPE_SECRET_KEY not configured'
)
stripe.api_key = settings.STRIPE_SECRET_KEY | {
"content_hash": "e23bf473677bd745204dac525d9b08c8",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 55,
"avg_line_length": 29.473684210526315,
"alnum_prop": 0.7321428571428571,
"repo_name": "Khabi/django-stripejs",
"id": "027115d005d26fa2a9f2f3507d18d9e9d6afc67b",
"size": "560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stripejs/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11450"
}
],
"symlink_target": ""
} |
from pydbg import *
from pydbg.defines import *
import logging
import struct
import utils
import sys
#this code left intentionally undocumented
dbg = pydbg()
found_imvu = False
print "\n\nIMVU HOOK by Exploit\n\n\n\n"
pattern = raw_input("\n[?] What string to search for? > \n")
logme = raw_input("\n[?] Would you like to log my output to a text file? ( y/n )")
#readinput()
print '\n[!] Searching for pattern:%s'%(pattern)
def ssl_sniff( dbg, args ):
buffer = ""
offset = 0
while 1:
byte = dbg.read_process_memory( args[1] + offset, 1 )
if byte != "\x00":
buffer += byte
offset += 1
continue
else:
break
if pattern in buffer:
logging.basicConfig(filename='hook.log',level=logging.DEBUG)
logging.debug("Pre-Encrypted: %s" % buffer)
print "Pre-Encrypted: %s" % buffer
return DBG_CONTINUE
for (pid, name) in dbg.enumerate_processes():
if name.lower() == "imvuclient.exe":
found_imvu = True
hooks = utils.hook_container()
dbg.attach(pid)
print "[!] Attaching to IMVU with PID: %d..." % pid
hook_address = dbg.func_resolve_debuggee("nspr4.dll","PR_Write")
if hook_address:
hooks.add( dbg, hook_address, 2, ssl_sniff, None)
print "[*] nspr4.PR_Write hooked at: 0x%08x" % hook_address
break
else:
print "[!] Error: Couldn't resolve hook address."
sys.exit(-1)
if found_imvu:
print "[*] Hook set, continuing process."
dbg.run()
else:
print "[!] Error: Couldn't find the process. Please fire up IMVU first."
sys.exit(-1)
| {
"content_hash": "7e3e2ef373ac62d7b59966516adceef9",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 83,
"avg_line_length": 27.285714285714285,
"alnum_prop": 0.576497963932519,
"repo_name": "0xicl33n/pyimvudbg",
"id": "91d3b31a013bb222423d12391585d80c3acfd63e",
"size": "1719",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hookOld.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "25058"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: cobbler_system
version_added: '2.7'
short_description: Manage system objects in Cobbler
description:
- Add, modify or remove systems in Cobbler
options:
host:
description:
- The name or IP address of the Cobbler system.
default: 127.0.0.1
port:
description:
- Port number to be used for REST connection.
- The default value depends on parameter C(use_ssl).
username:
description:
- The username to log in to Cobbler.
default: cobbler
password:
description:
- The password to log in to Cobbler.
required: yes
use_ssl:
description:
- If C(no), an HTTP connection will be used instead of the default HTTPS connection.
type: bool
default: 'yes'
validate_certs:
description:
- If C(no), SSL certificates will not be validated.
- This should only set to C(no) when used on personally controlled sites using self-signed certificates.
type: bool
default: 'yes'
name:
description:
- The system name to manage.
properties:
description:
- A dictionary with system properties.
interfaces:
description:
- A list of dictionaries containing interface options.
sync:
description:
- Sync on changes.
- Concurrently syncing Cobbler is bound to fail.
type: bool
default: no
state:
description:
- Whether the system should be present, absent or a query is made.
choices: [ absent, present, query ]
default: present
author:
- Dag Wieers (@dagwieers)
notes:
- Concurrently syncing Cobbler is bound to fail with weird errors.
- On python 2.7.8 and older (i.e. on RHEL7) you may need to tweak the python behaviour to disable certificate validation.
More information at L(Certificate verification in Python standard library HTTP clients,https://access.redhat.com/articles/2039753).
'''
EXAMPLES = r'''
- name: Ensure the system exists in Cobbler
cobbler_system:
host: cobbler01
username: cobbler
password: MySuperSecureP4sswOrd
name: myhost
properties:
profile: CentOS6-x86_64
name_servers: [ 2.3.4.5, 3.4.5.6 ]
name_servers_search: foo.com, bar.com
interfaces:
eth0:
macaddress: 00:01:02:03:04:05
ipaddress: 1.2.3.4
delegate_to: localhost
- name: Enable network boot in Cobbler
cobbler_system:
host: bdsol-aci-cobbler-01
username: cobbler
password: ins3965!
name: bdsol-aci51-apic1.cisco.com
properties:
netboot_enabled: yes
state: present
delegate_to: localhost
- name: Query all systems in Cobbler
cobbler_system:
host: cobbler01
username: cobbler
password: MySuperSecureP4sswOrd
register: cobbler_systems
delegate_to: localhost
- name: Query a specific system in Cobbler
cobbler_system:
host: cobbler01
username: cobbler
password: MySuperSecureP4sswOrd
name: '{{ inventory_hostname }}'
register: cobbler_properties
delegate_to: localhost
- name: Ensure the system does not exist in Cobbler
cobbler_system:
host: cobbler01
username: cobbler
password: MySuperSecureP4sswOrd
name: myhost
delegate_to: localhost
'''
RETURN = r'''
systems:
description: List of systems
returned: C(state=query) and C(name) is not provided
type: list
system:
description: (Resulting) information about the system we are working with
returned: when C(name) is provided
type: dict
'''
import copy
import datetime
import ssl
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
from ansible.module_utils.six.moves import xmlrpc_client
from ansible.module_utils._text import to_text
IFPROPS_MAPPING = dict(
bondingopts='bonding_opts',
bridgeopts='bridge_opts',
connected_mode='connected_mode',
cnames='cnames',
dhcptag='dhcp_tag',
dnsname='dns_name',
ifgateway='if_gateway',
interfacetype='interface_type',
interfacemaster='interface_master',
ipaddress='ip_address',
ipv6address='ipv6_address',
ipv6defaultgateway='ipv6_default_gateway',
ipv6mtu='ipv6_mtu',
ipv6prefix='ipv6_prefix',
ipv6secondaries='ipv6_secondariesu',
ipv6staticroutes='ipv6_static_routes',
macaddress='mac_address',
management='management',
mtu='mtu',
netmask='netmask',
static='static',
staticroutes='static_routes',
virtbridge='virt_bridge',
)
def getsystem(conn, name, token):
system = dict()
if name:
# system = conn.get_system(name, token)
systems = conn.find_system(dict(name=name), token)
if systems:
system = systems[0]
return system
def main():
module = AnsibleModule(
argument_spec=dict(
host=dict(type='str', default='127.0.0.1'),
port=dict(type='int'),
username=dict(type='str', default='cobbler'),
password=dict(type='str', no_log=True),
use_ssl=dict(type='bool', default=True),
validate_certs=dict(type='bool', default=True),
name=dict(type='str'),
interfaces=dict(type='dict'),
properties=dict(type='dict'),
sync=dict(type='bool', default=False),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
),
supports_check_mode=True,
)
username = module.params['username']
password = module.params['password']
port = module.params['port']
use_ssl = module.params['use_ssl']
validate_certs = module.params['validate_certs']
name = module.params['name']
state = module.params['state']
module.params['proto'] = 'https' if use_ssl else 'http'
if not port:
module.params['port'] = '443' if use_ssl else '80'
result = dict(
changed=False,
)
start = datetime.datetime.utcnow()
ssl_context = None
if not validate_certs:
try: # Python 2.7.9 and newer
ssl_context = ssl.create_unverified_context()
except AttributeError: # Legacy Python that doesn't verify HTTPS certificates by default
ssl._create_default_context = ssl._create_unverified_context
else: # Python 2.7.8 and older
ssl._create_default_https_context = ssl._create_unverified_https_context
url = '{proto}://{host}:{port}/cobbler_api'.format(**module.params)
if ssl_context:
conn = xmlrpc_client.ServerProxy(url, context=ssl_context)
else:
conn = xmlrpc_client.Server(url)
try:
token = conn.login(username, password)
except xmlrpc_client.Fault as e:
module.fail_json(msg="Failed to log in to Cobbler '{url}' as '{username}'. {error}".format(url=url, error=to_text(e), **module.params))
except Exception as e:
module.fail_json(msg="Connection to '{url}' failed. {error}".format(url=url, error=to_text(e), **module.params))
system = getsystem(conn, name, token)
# result['system'] = system
if state == 'query':
if name:
result['system'] = system
else:
# Turn it into a dictionary of dictionaries
# all_systems = conn.get_systems()
# result['systems'] = { system['name']: system for system in all_systems }
# Return a list of dictionaries
result['systems'] = conn.get_systems()
elif state == 'present':
if system:
# Update existing entry
system_id = conn.get_system_handle(name, token)
for key, value in iteritems(module.params['properties']):
if key not in system:
module.warn("Property '{0}' is not a valid system property.".format(key))
if system[key] != value:
try:
conn.modify_system(system_id, key, value, token)
result['changed'] = True
except Exception as e:
module.fail_json(msg="Unable to change '{0}' to '{1}'. {2}".format(key, value, e))
else:
# Create a new entry
system_id = conn.new_system(token)
conn.modify_system(system_id, 'name', name, token)
result['changed'] = True
if module.params['properties']:
for key, value in iteritems(module.params['properties']):
try:
conn.modify_system(system_id, key, value, token)
except Exception as e:
module.fail_json(msg="Unable to change '{0}' to '{1}'. {2}".format(key, value, e))
# Add interface properties
interface_properties = dict()
if module.params['interfaces']:
for device, values in iteritems(module.params['interfaces']):
for key, value in iteritems(values):
if key == 'name':
continue
if key not in IFPROPS_MAPPING:
module.warn("Property '{0}' is not a valid system property.".format(key))
if not system or system['interfaces'][device][IFPROPS_MAPPING[key]] != value:
result['changed'] = True
interface_properties['{0}-{1}'.format(key, device)] = value
if result['changed'] is True:
conn.modify_system(system_id, "modify_interface", interface_properties, token)
# Only save when the entry was changed
if not module.check_mode and result['changed']:
conn.save_system(system_id, token)
elif state == 'absent':
if system:
if not module.check_mode:
conn.remove_system(name, token)
result['changed'] = True
if not module.check_mode and module.params['sync'] and result['changed']:
try:
conn.sync(token)
except Exception as e:
module.fail_json(msg="Failed to sync Cobbler. {0}".format(to_text(e)))
if state in ('absent', 'present'):
result['system'] = getsystem(conn, name, token)
if module._diff:
result['diff'] = dict(before=system, after=result['system'])
elapsed = datetime.datetime.utcnow() - start
module.exit_json(elapsed=elapsed.seconds, **result)
if __name__ == '__main__':
main()
| {
"content_hash": "564aedb334711450f67e81d2a7780999",
"timestamp": "",
"source": "github",
"line_count": 328,
"max_line_length": 143,
"avg_line_length": 32.1280487804878,
"alnum_prop": 0.6187132283165686,
"repo_name": "SergeyCherepanov/ansible",
"id": "a7768973febe4398ae7f75022ddbcc2534058c5e",
"size": "10738",
"binary": false,
"copies": "24",
"ref": "refs/heads/master",
"path": "ansible/ansible/modules/remote_management/cobbler/cobbler_system.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Shell",
"bytes": "824"
}
],
"symlink_target": ""
} |
"""
Trove Management Command line tool
"""
import json
import os
import sys
from troveclient.compat import common
# If ../trove/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'troveclient.compat',
'__init__.py')):
sys.path.insert(0, possible_topdir)
oparser = None
def _pretty_print(info):
print(json.dumps(info, sort_keys=True, indent=4))
class HostCommands(common.AuthedCommandsBase):
"""Commands to list info on hosts."""
params = [
'name',
]
def update_all(self):
"""Update all instances on a host."""
self._require('name')
self.dbaas.hosts.update_all(self.name)
def get(self):
"""List details for the specified host."""
self._require('name')
self._pretty_print(self.dbaas.hosts.get, self.name)
def list(self):
"""List all compute hosts."""
self._pretty_list(self.dbaas.hosts.index)
class QuotaCommands(common.AuthedCommandsBase):
"""List and update quota limits for a tenant."""
params = ['id',
'instances',
'volumes',
'backups']
def list(self):
"""List all quotas for a tenant."""
self._require('id')
self._pretty_print(self.dbaas.quota.show, self.id)
def update(self):
"""Update quota limits for a tenant."""
self._require('id')
self._pretty_print(self.dbaas.quota.update, self.id,
dict((param, getattr(self, param))
for param in self.params if param != 'id'))
class RootCommands(common.AuthedCommandsBase):
"""List details about the root info for an instance."""
params = [
'id',
]
def history(self):
"""List root history for the instance."""
self._require('id')
self._pretty_print(self.dbaas.management.root_enabled_history, self.id)
class AccountCommands(common.AuthedCommandsBase):
"""Commands to list account info."""
params = [
'id',
]
def list(self):
"""List all accounts with non-deleted instances."""
self._pretty_print(self.dbaas.accounts.index)
def get(self):
"""List details for the account provided."""
self._require('id')
self._pretty_print(self.dbaas.accounts.show, self.id)
class InstanceCommands(common.AuthedCommandsBase):
"""List details about an instance."""
params = [
'deleted',
'id',
'limit',
'marker',
'host',
]
def get(self):
"""List details for the instance."""
self._require('id')
self._pretty_print(self.dbaas.management.show, self.id)
def list(self):
"""List all instances for account."""
deleted = None
if self.deleted is not None:
if self.deleted.lower() in ['true']:
deleted = True
elif self.deleted.lower() in ['false']:
deleted = False
self._pretty_paged(self.dbaas.management.index, deleted=deleted)
def hwinfo(self):
"""Show hardware information details about an instance."""
self._require('id')
self._pretty_print(self.dbaas.hwinfo.get, self.id)
def diagnostic(self):
"""List diagnostic details about an instance."""
self._require('id')
self._pretty_print(self.dbaas.diagnostics.get, self.id)
def stop(self):
"""Stop MySQL on the given instance."""
self._require('id')
self._pretty_print(self.dbaas.management.stop, self.id)
def reboot(self):
"""Reboot the instance."""
self._require('id')
self._pretty_print(self.dbaas.management.reboot, self.id)
def migrate(self):
"""Migrate the instance."""
self._require('id')
self._pretty_print(self.dbaas.management.migrate, self.id, self.host)
def reset_task_status(self):
"""Set the instance's task status to NONE."""
self._require('id')
self._pretty_print(self.dbaas.management.reset_task_status, self.id)
class StorageCommands(common.AuthedCommandsBase):
"""Commands to list devices info."""
params = []
def list(self):
"""List details for the storage device."""
self._pretty_list(self.dbaas.storage.index)
class FlavorsCommands(common.AuthedCommandsBase):
"""Commands for managing Flavors."""
params = [
'name',
'ram',
'disk',
'vcpus',
'flavor_id',
'ephemeral',
'swap',
'rxtx_factor',
'service_type'
]
def create(self):
"""Create a new flavor."""
self._require('name', 'ram', 'disk', 'vcpus',
'flavor_id', 'service_type')
self._pretty_print(self.dbaas.mgmt_flavor.create, self.name,
self.ram, self.disk, self.vcpus, self.flavor_id,
self.ephemeral, self.swap, self.rxtx_factor,
self.service_type)
def config_options(oparser):
oparser.add_option("-u", "--url", default="http://localhost:5000/v1.1",
help="Auth API endpoint URL with port and version. \
Default: http://localhost:5000/v1.1")
COMMANDS = {
'account': AccountCommands,
'host': HostCommands,
'instance': InstanceCommands,
'root': RootCommands,
'storage': StorageCommands,
'quota': QuotaCommands,
'flavor': FlavorsCommands,
}
def main():
# Parse arguments
oparser = common.CliOptions.create_optparser(True)
for k, v in COMMANDS.items():
v._prepare_parser(oparser)
(options, args) = oparser.parse_args()
if not args:
common.print_commands(COMMANDS)
# Pop the command and check if it's in the known commands
cmd = args.pop(0)
if cmd in COMMANDS:
fn = COMMANDS.get(cmd)
command_object = None
try:
command_object = fn(oparser)
except Exception as ex:
if options.debug:
raise
print(ex)
# Get a list of supported actions for the command
actions = common.methods_of(command_object)
if len(args) < 1:
common.print_actions(cmd, actions)
# Check for a valid action and perform that action
action = args.pop(0)
if action in actions:
try:
getattr(command_object, action)()
except Exception as ex:
if options.debug:
raise
print(ex)
else:
common.print_actions(cmd, actions)
else:
common.print_commands(COMMANDS)
if __name__ == '__main__':
main()
| {
"content_hash": "a3b9f53e16054619d60f65f4fe28723f",
"timestamp": "",
"source": "github",
"line_count": 254,
"max_line_length": 79,
"avg_line_length": 27.85433070866142,
"alnum_prop": 0.5674911660777385,
"repo_name": "openstack/python-troveclient",
"id": "e3efbae9a8b128d0e546666f4a7e0c19a8d07b70",
"size": "7714",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "troveclient/compat/mcli.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "904048"
},
{
"name": "Shell",
"bytes": "1432"
}
],
"symlink_target": ""
} |
BOT_NAME = 'ZhenAi'
SPIDER_MODULES = ['ZhenAi.spiders']
NEWSPIDER_MODULE = 'ZhenAi.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'ZhenAi (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0',
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
'Accept-Encoding': 'gzip, deflate',
'X-Requested-With': 'XMLHttpRequest',
'Referer': 'http://search.zhenai.com/v2/search/pinterest.do?'
'sex=1&agebegin=18&ageend=-1&workcityprovince=-1&workcitycity=-1'
'&info=&h1=-1&h2=-1&salaryBegin=-1&salaryEnd=-1&occupation=-1&h=-1'
'&c=-1&workcityprovince1=-1&workcitycity1=-1&constellation=-1&animals=-1'
'&stock=-1&belief=-1&lvBegin=-1&lvEnd=-1&condition=66&orderby=hpf&hotIndex=&online=',
}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'ZhenAi.middlewares.ZhenaiSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'ZhenAi.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'ZhenAi.pipelines.ZhenAiImagePipline': 300,
}
IMAGES_STORE = 'pic'
IMAGES_EXPIRES = 90
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| {
"content_hash": "c92c89d56cc620a27b5d920d71c6c103",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 109,
"avg_line_length": 37.51685393258427,
"alnum_prop": 0.7433363282419886,
"repo_name": "huangchuchuan/Spider",
"id": "34b05e62d88507951bba4841aa9c4c2274ca2078",
"size": "3770",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ZhenAiSpider/ZhenAi/ZhenAi/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "84078"
}
],
"symlink_target": ""
} |
"""
Author: Daniel E. Cook
User profile
"""
from flask import render_template, Blueprint, session, flash, redirect, url_for
from base.models import user_ds
from logzero import logger
user_bp = Blueprint('user',
__name__)
@user_bp.route('/profile/<string:username>')
def user(username):
""" The User Profile
"""
user_obj = session.get('user')
if user_obj is None:
flash("You must be logged in to view your profile", 'danger')
return redirect(url_for('primary.primary'))
VARS = {'title': username,
'user_obj': user_ds(user_obj['name'])}
return render_template('user.html', **VARS)
| {
"content_hash": "33fe8569de8c114fb60a6615f0b2f49a",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 79,
"avg_line_length": 24.62962962962963,
"alnum_prop": 0.6210526315789474,
"repo_name": "AndersenLab/CeNDR",
"id": "eacb7592eff9e85f62bb4aeb31ef91d279d88b5d",
"size": "711",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "base/views/user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "57"
},
{
"name": "CSS",
"bytes": "15097"
},
{
"name": "Dockerfile",
"bytes": "1985"
},
{
"name": "HTML",
"bytes": "211057"
},
{
"name": "Python",
"bytes": "180790"
},
{
"name": "R",
"bytes": "19967"
},
{
"name": "Shell",
"bytes": "4053"
}
],
"symlink_target": ""
} |
"""
GEMPRO
======
"""
import logging
import os
import os.path as op
import shutil
from copy import copy
import pandas as pd
from Bio import SeqIO
from bioservices import KEGG
from bioservices import UniProt
from cobra.core import DictList
from six.moves.urllib.error import HTTPError
from slugify import Slugify
import ssbio.core.modelpro
import ssbio.databases.kegg
import ssbio.databases.pdb
import ssbio.databases.uniprot
import ssbio.protein.sequence.properties.residues
import ssbio.protein.sequence.properties.tmhmm
import ssbio.protein.sequence.utils.fasta
import ssbio.protein.structure.properties.msms
import ssbio.protein.structure.properties.quality
import ssbio.protein.structure.properties.residues
from ssbio import utils
from ssbio.core.genepro import GenePro
from ssbio.core.modelpro import ModelPro
from ssbio.core.object import Object
from ssbio.databases.kegg import KEGGProp
from ssbio.databases.uniprot import UniProtProp
from ssbio.protein.sequence.properties.scratch import SCRATCH
if utils.is_ipynb():
from tqdm import tqdm_notebook as tqdm
else:
from tqdm import tqdm
custom_slugify = Slugify(safe_chars='-_.')
logging.getLogger("requests").setLevel(logging.ERROR)
logging.getLogger("urllib3").setLevel(logging.ERROR)
log = logging.getLogger(__name__)
date = utils.Date()
bs_unip = UniProt()
bs_kegg = KEGG()
class GEMPRO(Object):
"""Generic class to represent all information for a GEM-PRO project.
Initialize the GEM-PRO project with a genome-scale model, a list of genes, or a dict of genes and sequences.
Specify the name of your project, along with the root directory where a folder with that name will be created.
Main methods provided are:
#. Automated mapping of sequence IDs
* With KEGG mapper
* With UniProt mapper
* Allowing manual gene ID --> protein sequence entry
* Allowing manual gene ID --> UniProt ID
#. Consolidating sequence IDs and setting a representative sequence
* Currently these are set based on available PDB IDs
#. Mapping of representative sequence --> structures
* With UniProt --> ranking of PDB structures
* BLAST representative sequence --> PDB database
#. Preparation of files for homology modeling (currently for I-TASSER)
* Mapping to existing models
* Preparation for running I-TASSER
* Parsing I-TASSER runs
#. Running QC/QA on structures and setting a representative structure
* Various cutoffs (mutations, insertions, deletions) can be set to filter structures
#. Automation of protein sequence and structure property calculation
#. Creation of Pandas DataFrame summaries directly from downloaded metadata
Args:
gem_name (str): The name of your GEM or just your project in general. This will be the name of the main folder
that is created in root_dir.
root_dir (str): Path to where the folder named after ``gem_name`` will be created. If not provided, directories
will not be created and output directories need to be specified for some steps.
pdb_file_type (str): ``pdb``, ``mmCif``, ``xml``, ``mmtf`` - file type for files downloaded from the PDB
gem (Model): COBRApy Model object
gem_file_path (str): Path to GEM file
gem_file_type (str): GEM model type - ``sbml`` (or ``xml``), ``mat``, or ``json`` formats
genes_list (list): List of gene IDs that you want to map
genes_and_sequences (dict): Dictionary of gene IDs and their amino acid sequence strings
genome_path (str): FASTA file of all protein sequences
write_protein_fasta_files (bool): If individual protein FASTA files should be written out
description (str): Description string of your project
custom_spont_id (str): ID of spontaneous genes in a COBRA model which will be ignored for analysis
"""
def __init__(self, gem_name, root_dir=None, pdb_file_type='mmtf',
gem=None, gem_file_path=None, gem_file_type=None,
genes_list=None, genes_and_sequences=None, genome_path=None,
write_protein_fasta_files=True,
description=None, custom_spont_id=None):
Object.__init__(self, id=gem_name, description=description)
self.genes = DictList()
"""DictList: All protein-coding genes in this GEM-PRO project"""
self.custom_spont_id = custom_spont_id
"""str: ID of spontaneous genes in a COBRA model which will be ignored for analysis"""
self.pdb_file_type = pdb_file_type
"""str: ``pdb``, ``mmCif``, ``xml``, ``mmtf`` - file type for files downloaded from the PDB"""
self.genome_path = genome_path
"""str: Simple link to the filepath of the FASTA file containing all protein sequences"""
self.model = None
"""Model: COBRApy model object"""
# Create directories
self._root_dir = None
if root_dir:
self.root_dir = root_dir
# TODO: add some checks for multiple inputs (only allow one!)
# Load a Model object
if gem:
self.load_cobra_model(gem)
# Or, load a GEM file
elif gem_file_path and gem_file_type:
gem = ssbio.core.modelpro.model_loader(gem_file_path=gem_file_path,
gem_file_type=gem_file_type)
self.load_cobra_model(gem)
# Or, load a list of gene IDs
elif genes_list:
self.add_gene_ids(genes_list)
# Or, load a dictionary of genes and their sequences
elif genes_and_sequences:
self.add_gene_ids(list(genes_and_sequences.keys()))
self.manual_seq_mapping(genes_and_sequences, write_fasta_files=write_protein_fasta_files)
# Or, load the provided FASTA file
elif genome_path:
genes_and_sequences = ssbio.protein.sequence.utils.fasta.load_fasta_file_as_dict_of_seqrecords(genome_path)
self.add_gene_ids(list(genes_and_sequences.keys()))
self.manual_seq_mapping(genes_and_sequences, write_fasta_files=write_protein_fasta_files)
# If neither a model or genes are input, you can still add IDs with method add_gene_ids later
else:
log.warning('No model or genes input')
log.info('{}: number of genes'.format(len(self.genes)))
@property
def root_dir(self):
"""str: Directory where GEM-PRO project folder named after the attribute ``base_dir`` is located."""
return self._root_dir
@root_dir.setter
def root_dir(self, path):
if not path:
raise ValueError('No path specified')
if not op.exists(path):
raise ValueError('{}: folder does not exist'.format(path))
if self._root_dir:
log.info('Changing root directory of GEM-PRO project "{}" from {} to {}'.format(self.id, self.root_dir, path))
if not op.exists(op.join(path, self.id)):
raise IOError('GEM-PRO project "{}" does not exist in folder {}'.format(self.id, path))
else:
log.info('Creating GEM-PRO project directory in folder {}'.format(path))
self._root_dir = path
for d in [self.base_dir, self.model_dir, self.data_dir, self.genes_dir]:#, self.structures_dir]:
ssbio.utils.make_dir(d)
log.info('{}: GEM-PRO project location'.format(self.base_dir))
# Propagate changes to gene
if hasattr(self, 'genes'):
for g in self.genes:
g.root_dir = self.genes_dir
@property
def base_dir(self):
"""str: GEM-PRO project folder."""
if self.root_dir:
return op.join(self.root_dir, self.id)
else:
return None
@property
def model_dir(self):
"""str: Directory where original GEMs and GEM-related files are stored."""
if self.base_dir:
return op.join(self.base_dir, 'model')
else:
return None
@property
def data_dir(self):
"""str: Directory where all data are stored."""
if self.base_dir:
return op.join(self.base_dir, 'data')
else:
return None
@property
def genes_dir(self):
"""str: Directory where all gene specific information is stored."""
if self.base_dir:
return op.join(self.base_dir, 'genes')
else:
return None
# @property
# def structures_dir(self):
# """str: Directory where all structures are stored."""
# # XTODO: replace storage of structures in individual protein directories with this to reduce redundancy
# if self.base_dir:
# return op.join(self.base_dir, 'structures')
# else:
# return None
def load_cobra_model(self, model):
"""Load a COBRApy Model object into the GEM-PRO project.
Args:
model (Model): COBRApy ``Model`` object
"""
self.model = ModelPro(model)
for g in self.model.genes:
if self.genes_dir:
g.root_dir = self.genes_dir
g.protein.pdb_file_type = self.pdb_file_type
self.genes = self.model.genes
log.info('{}: loaded model'.format(model.id))
log.info('{}: number of reactions'.format(len(self.model.reactions)))
log.info('{}: number of reactions linked to a gene'.format(ssbio.core.modelpro.true_num_reactions(self.model)))
log.info('{}: number of genes (excluding spontaneous)'.format(ssbio.core.modelpro.true_num_genes(self.model,
custom_spont_id=self.custom_spont_id)))
log.info('{}: number of metabolites'.format(len(self.model.metabolites)))
log.warning('IMPORTANT: All Gene objects have been transformed into GenePro '
'objects, and will be for any new ones')
@property
def genes_with_structures(self):
"""DictList: All genes with any mapped protein structures."""
return DictList(x for x in self.genes if x.protein.num_structures > 0)
@property
def genes_with_experimental_structures(self):
"""DictList: All genes that have at least one experimental structure."""
return DictList(x for x in self.genes_with_structures if x.protein.num_structures_experimental > 0)
@property
def genes_with_homology_models(self):
"""DictList: All genes that have at least one homology model."""
return DictList(x for x in self.genes_with_structures if x.protein.num_structures_homology > 0)
@property
def genes_with_a_representative_sequence(self):
"""DictList: All genes with a representative sequence."""
return DictList(x for x in self.genes if x.protein.representative_sequence)
# return DictList(y for y in tmp if y.protein.representative_sequence.seq)
@property
def genes_with_a_representative_structure(self):
"""DictList: All genes with a representative protein structure."""
tmp = DictList(x for x in self.genes if x.protein.representative_structure)
return DictList(y for y in tmp if y.protein.representative_structure.structure_file)
@property
def functional_genes(self):
"""DictList: All functional genes with a representative sequence"""
return DictList(x for x in self.genes if x.functional)
# @property
# def genes(self):
# """DictList: All genes excluding spontaneous ones."""
# return ssbio.core.modelpro.filter_out_spontaneous_genes(self._genes, custom_spont_id=self.custom_spont_id)
# @genes.setter
# def genes(self, genes_list):
# """Set the genes attribute to be a DictList of GenePro objects.
#
# A "protein" attribute will be added to each Gene.
#
# Args:
# genes_list: DictList of COBRApy Gene objects, or list of gene IDs
#
# """
#
# if not isinstance(genes_list, DictList):
# tmp_list = []
# for x in list(set(genes_list)):
# x = str(x)
# new_gene = GenePro(id=x, pdb_file_type=self.pdb_file_type, root_dir=self.genes_dir)
# tmp_list.append(new_gene)
# self._genes = DictList(tmp_list)
# else:
# self._genes = genes_list
def add_gene_ids(self, genes_list):
"""Add gene IDs manually into the GEM-PRO project.
Args:
genes_list (list): List of gene IDs as strings.
"""
orig_num_genes = len(self.genes)
for g in list(set(genes_list)):
if not self.genes.has_id(g):
new_gene = GenePro(id=g, pdb_file_type=self.pdb_file_type, root_dir=self.genes_dir)
if self.model:
self.model.genes.append(new_gene)
else:
self.genes.append(new_gene)
log.info('Added {} genes to GEM-PRO project'.format(len(self.genes)-orig_num_genes))
####################################################################################################################
### SEQUENCE RELATED METHODS ###
def kegg_mapping_and_metadata(self, kegg_organism_code, custom_gene_mapping=None, outdir=None,
set_as_representative=False, force_rerun=False):
"""Map all genes in the model to KEGG IDs using the KEGG service.
Steps:
1. Download all metadata and sequence files in the sequences directory
2. Creates a KEGGProp object in the protein.sequences attribute
3. Returns a Pandas DataFrame of mapping results
Args:
kegg_organism_code (str): The three letter KEGG code of your organism
custom_gene_mapping (dict): If your model genes differ from the gene IDs you want to map,
custom_gene_mapping allows you to input a dictionary which maps model gene IDs to new ones.
Dictionary keys must match model gene IDs.
outdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories
were not created initially
set_as_representative (bool): If mapped KEGG IDs should be set as representative sequences
force_rerun (bool): If you want to overwrite any existing mappings and files
"""
# First map all of the organism's KEGG genes to UniProt
kegg_to_uniprot = ssbio.databases.kegg.map_kegg_all_genes(organism_code=kegg_organism_code, target_db='uniprot')
successfully_mapped_counter = 0
for g in tqdm(self.genes):
if custom_gene_mapping:
kegg_g = custom_gene_mapping[g.id]
else:
kegg_g = g.id
if kegg_g not in kegg_to_uniprot:
log.debug('{}: unable to map to KEGG'.format(g.id))
continue
# Download both FASTA and KEGG metadata files
kegg_prop = g.protein.load_kegg(kegg_id=kegg_g, kegg_organism_code=kegg_organism_code,
download=True, outdir=outdir, set_as_representative=set_as_representative,
force_rerun=force_rerun)
# Update potentially old UniProt ID
if kegg_g in kegg_to_uniprot.keys():
kegg_prop.uniprot = kegg_to_uniprot[kegg_g]
if g.protein.representative_sequence:
if g.protein.representative_sequence.kegg == kegg_prop.kegg:
g.protein.representative_sequence.uniprot = kegg_to_uniprot[kegg_g]
# Keep track of missing mappings - missing is defined by no available sequence
if kegg_prop.sequence_file:
successfully_mapped_counter += 1
log.debug('{}: loaded KEGG information for gene'.format(g.id))
log.info('{}/{}: number of genes mapped to KEGG'.format(successfully_mapped_counter, len(self.genes)))
log.info('Completed ID mapping --> KEGG. See the "df_kegg_metadata" attribute for a summary dataframe.')
def kegg_mapping_and_metadata_parallelize(self, sc, kegg_organism_code, custom_gene_mapping=None, outdir=None,
set_as_representative=False, force_rerun=False):
"""Map all genes in the model to KEGG IDs using the KEGG service.
Steps:
1. Download all metadata and sequence files in the sequences directory
2. Creates a KEGGProp object in the protein.sequences attribute
3. Returns a Pandas DataFrame of mapping results
Args:
sc (SparkContext): Spark Context to parallelize this function
kegg_organism_code (str): The three letter KEGG code of your organism
custom_gene_mapping (dict): If your model genes differ from the gene IDs you want to map,
custom_gene_mapping allows you to input a dictionary which maps model gene IDs to new ones.
Dictionary keys must match model gene IDs.
outdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories
were not created initially
set_as_representative (bool): If mapped KEGG IDs should be set as representative sequences
force_rerun (bool): If you want to overwrite any existing mappings and files
"""
# First map all of the organism's KEGG genes to UniProt
kegg_to_uniprot = ssbio.databases.kegg.map_kegg_all_genes(organism_code=kegg_organism_code, target_db='uniprot')
# Parallelize the genes list
genes_rdd = sc.parallelize(self.genes)
# Write a sub-function to carry out the bulk of the original function
def gp_kegg_sc(g):
if custom_gene_mapping:
kegg_g = custom_gene_mapping[g.id]
else:
kegg_g = g.id
# Download both FASTA and KEGG metadata files
kegg_prop = g.protein.load_kegg(kegg_id=kegg_g, kegg_organism_code=kegg_organism_code,
download=True, outdir=outdir,
set_as_representative=set_as_representative,
force_rerun=force_rerun)
# Update potentially old UniProt ID
if kegg_g in kegg_to_uniprot.keys():
kegg_prop.uniprot = kegg_to_uniprot[kegg_g]
if g.protein.representative_sequence:
if g.protein.representative_sequence.kegg == kegg_prop.kegg:
g.protein.representative_sequence.uniprot = kegg_to_uniprot[kegg_g]
# Tracker for if it mapped successfully to KEGG
if kegg_prop.sequence_file:
success = True
else:
success = False
return g, success
# Run a map operation to execute the function on all items in the RDD
result = genes_rdd.map(gp_kegg_sc).collect()
# Copy the results over to the GEM-PRO object's genes using the GenePro function "copy_modified_gene"
# Also count how many genes mapped to KEGG
successfully_mapped_counter = 0
for modified_g, success in result:
original_gene = self.genes.get_by_id(modified_g.id)
original_gene.copy_modified_gene(modified_g)
if success:
successfully_mapped_counter += 1
log.info('{}/{}: number of genes mapped to KEGG'.format(successfully_mapped_counter, len(self.genes)))
log.info('Completed ID mapping --> KEGG. See the "df_kegg_metadata" attribute for a summary dataframe.')
@property
def df_kegg_metadata(self):
"""DataFrame: Pandas DataFrame of KEGG metadata per protein."""
kegg_pre_df = []
df_cols = ['gene', 'kegg', 'refseq', 'uniprot', 'num_pdbs', 'pdbs', 'seq_len', 'sequence_file', 'metadata_file']
for g in self.genes:
kegg_mappings = g.protein.filter_sequences(KEGGProp)
for kegg_prop in kegg_mappings:
kegg_dict = kegg_prop.get_dict(df_format=True, only_attributes=df_cols)
kegg_dict['gene'] = g.id
kegg_pre_df.append(kegg_dict)
# Save a dataframe of the file mapping info
df = pd.DataFrame.from_records(kegg_pre_df, columns=df_cols).set_index('gene')
if df.empty:
log.warning('Empty dataframe')
return df
else:
return ssbio.utils.clean_df(df)
@property
def missing_kegg_mapping(self):
"""list: List of genes with no mapping to KEGG."""
kegg_missing = []
for g in self.genes:
keggs = g.protein.filter_sequences(KEGGProp)
no_sequence_file_available = True
for k in keggs:
if k.sequence_file:
no_sequence_file_available = False
break
if no_sequence_file_available:
kegg_missing.append(g.id)
return list(set(kegg_missing))
def uniprot_mapping_and_metadata(self, model_gene_source, custom_gene_mapping=None, outdir=None,
set_as_representative=False, force_rerun=False):
"""Map all genes in the model to UniProt IDs using the UniProt mapping service.
Also download all metadata and sequences.
Args:
model_gene_source (str): the database source of your model gene IDs.
See: http://www.uniprot.org/help/api_idmapping
Common model gene sources are:
* Ensembl Genomes - ``ENSEMBLGENOME_ID`` (i.e. E. coli b-numbers)
* Entrez Gene (GeneID) - ``P_ENTREZGENEID``
* RefSeq Protein - ``P_REFSEQ_AC``
custom_gene_mapping (dict): If your model genes differ from the gene IDs you want to map,
custom_gene_mapping allows you to input a dictionary which maps model gene IDs to new ones.
Dictionary keys must match model genes.
outdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories
were not created initially
set_as_representative (bool): If mapped UniProt IDs should be set as representative sequences
force_rerun (bool): If you want to overwrite any existing mappings and files
"""
# Allow model gene --> custom ID mapping ({'TM_1012':'TM1012'})
if custom_gene_mapping:
genes_to_map = list(custom_gene_mapping.values())
else:
genes_to_map = [x.id for x in self.genes]
# Map all IDs first to available UniProts
genes_to_uniprots = bs_unip.mapping(fr=model_gene_source, to='ACC', query=genes_to_map)
successfully_mapped_counter = 0
for g in tqdm(self.genes):
if custom_gene_mapping and g.id in custom_gene_mapping.keys():
uniprot_gene = custom_gene_mapping[g.id]
else:
uniprot_gene = g.id
if uniprot_gene not in genes_to_uniprots:
log.debug('{}: unable to map to UniProt'.format(g.id))
continue
for mapped_uniprot in genes_to_uniprots[uniprot_gene]:
try:
uniprot_prop = g.protein.load_uniprot(uniprot_id=mapped_uniprot, download=True, outdir=outdir,
set_as_representative=set_as_representative,
force_rerun=force_rerun)
except HTTPError as e:
log.error('{}, {}: unable to complete web request'.format(g.id, mapped_uniprot))
print(e)
continue
if uniprot_prop.sequence_file or uniprot_prop.metadata_file:
successfully_mapped_counter += 1
log.info('{}/{}: number of genes mapped to UniProt'.format(successfully_mapped_counter, len(self.genes)))
log.info('Completed ID mapping --> UniProt. See the "df_uniprot_metadata" attribute for a summary dataframe.')
def manual_uniprot_mapping(self, gene_to_uniprot_dict, outdir=None, set_as_representative=True):
"""Read a manual dictionary of model gene IDs --> UniProt IDs. By default sets them as representative.
This allows for mapping of the missing genes, or overriding of automatic mappings.
Input a dictionary of::
{
<gene_id1>: <uniprot_id1>,
<gene_id2>: <uniprot_id2>,
}
Args:
gene_to_uniprot_dict: Dictionary of mappings as shown above
outdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories
were not created initially
set_as_representative (bool): If mapped UniProt IDs should be set as representative sequences
"""
for g, u in tqdm(gene_to_uniprot_dict.items()):
g = str(g)
gene = self.genes.get_by_id(g)
try:
uniprot_prop = gene.protein.load_uniprot(uniprot_id=u,
outdir=outdir, download=True,
set_as_representative=set_as_representative)
except HTTPError as e:
log.error('{}, {}: unable to complete web request'.format(g, u))
print(e)
continue
log.info('Completed manual ID mapping --> UniProt. See the "df_uniprot_metadata" attribute for a summary dataframe.')
@property
def df_uniprot_metadata(self):
"""DataFrame: Pandas DataFrame of UniProt metadata per protein."""
uniprot_pre_df = []
df_cols = ['gene', 'uniprot', 'reviewed', 'gene_name', 'kegg', 'refseq', 'num_pdbs', 'pdbs', 'ec_number',
'pfam', 'seq_len', 'description', 'entry_date', 'entry_version', 'seq_date', 'seq_version',
'sequence_file', 'metadata_file']
for g in self.genes:
uniprot_mappings = g.protein.filter_sequences(UniProtProp)
for uniprot_prop in uniprot_mappings:
uniprot_dict = uniprot_prop.get_dict(df_format=True, only_attributes=df_cols)
uniprot_dict['gene'] = g.id
uniprot_pre_df.append(uniprot_dict)
df = pd.DataFrame.from_records(uniprot_pre_df, columns=df_cols).set_index('gene')
if df.empty:
log.warning('Empty dataframe')
return df
else:
return ssbio.utils.clean_df(df)
@property
def missing_uniprot_mapping(self):
"""list: List of genes with no mapping to UniProt."""
uniprot_missing = []
for g in self.genes:
ups = g.protein.filter_sequences(UniProtProp)
no_sequence_file_available = True
for u in ups:
if u.sequence_file or u.metadata_file:
no_sequence_file_available = False
break
if no_sequence_file_available:
uniprot_missing.append(g.id)
return list(set(uniprot_missing))
# TODO: should also have a seq --> uniprot id function (has to be 100% match) (also needs organism)
def manual_seq_mapping(self, gene_to_seq_dict, outdir=None, write_fasta_files=True, set_as_representative=True):
"""Read a manual input dictionary of model gene IDs --> protein sequences. By default sets them as representative.
Args:
gene_to_seq_dict (dict): Mapping of gene IDs to their protein sequence strings
outdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories
were not created initially
write_fasta_files (bool): If individual protein FASTA files should be written out
set_as_representative (bool): If mapped sequences should be set as representative
"""
if outdir:
outdir_set = True
else:
outdir_set = False
# Save the sequence information in individual FASTA files
for g, s in gene_to_seq_dict.items():
gene = self.genes.get_by_id(str(g))
if not outdir_set and write_fasta_files:
outdir = gene.protein.sequence_dir
if not outdir:
raise ValueError('Output directory must be specified')
manual_info = gene.protein.load_manual_sequence(ident=g, seq=s, outdir=outdir,
write_fasta_file=write_fasta_files,
set_as_representative=set_as_representative)
log.debug('{}: loaded manually defined sequence information'.format(g))
log.info('Loaded in {} sequences'.format(len(gene_to_seq_dict)))
def set_representative_sequence(self, force_rerun=False):
"""Automatically consolidate loaded sequences (manual, UniProt, or KEGG) and set a single representative sequence.
Manually set representative sequences override all existing mappings. UniProt mappings override KEGG mappings
except when KEGG mappings have PDBs associated with them and UniProt doesn't.
Args:
force_rerun (bool): Set to True to recheck stored sequences
"""
# TODO: rethink use of multiple database sources - may lead to inconsistency with genome sources
successfully_mapped_counter = 0
for g in tqdm(self.genes):
repseq = g.protein.set_representative_sequence(force_rerun=force_rerun)
if repseq:
if repseq.sequence_file:
successfully_mapped_counter += 1
log.info('{}/{}: number of genes with a representative sequence'.format(len(self.genes_with_a_representative_sequence),
len(self.genes)))
log.info('See the "df_representative_sequences" attribute for a summary dataframe.')
@property
def df_representative_sequences(self):
"""DataFrame: Pandas DataFrame of representative sequence information per protein."""
seq_mapping_pre_df = []
df_cols = ['gene', 'uniprot', 'kegg', 'num_pdbs', 'pdbs', 'seq_len', 'sequence_file', 'metadata_file']
for g in self.genes_with_a_representative_sequence:
gene_dict = g.protein.representative_sequence.get_dict(df_format=True, only_attributes=df_cols)
gene_dict['gene'] = g.id
seq_mapping_pre_df.append(gene_dict)
df = pd.DataFrame.from_records(seq_mapping_pre_df, columns=df_cols).set_index('gene')
if df.empty:
log.warning('Empty dataframe')
return df
else:
return ssbio.utils.clean_df(df)
@property
def missing_representative_sequence(self):
"""list: List of genes with no mapping to a representative sequence."""
return [x.id for x in self.genes if not self.genes_with_a_representative_sequence.has_id(x.id)]
def write_representative_sequences_file(self, outname, outdir=None, set_ids_from_model=True):
"""Write all the model's sequences as a single FASTA file. By default, sets IDs to model gene IDs.
Args:
outname (str): Name of the output FASTA file without the extension
outdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories
were not created initially
set_ids_from_model (bool): If the gene ID source should be the model gene IDs, not the original sequence ID
"""
if not outdir:
outdir = self.data_dir
if not outdir:
raise ValueError('Output directory must be specified')
outfile = op.join(outdir, outname + '.faa')
tmp = []
for x in self.genes_with_a_representative_sequence:
repseq = x.protein.representative_sequence
copied_seq_record = copy(repseq)
if set_ids_from_model:
copied_seq_record.id = x.id
tmp.append(copied_seq_record)
SeqIO.write(tmp, outfile, "fasta")
log.info('{}: wrote all representative sequences to file'.format(outfile))
self.genome_path = outfile
return self.genome_path
def get_sequence_properties(self, clean_seq=False, representatives_only=True):
"""Run Biopython ProteinAnalysis and EMBOSS pepstats to summarize basic statistics of all protein sequences.
Results are stored in the protein's respective SeqProp objects at ``.annotations``
Args:
representative_only (bool): If analysis should only be run on the representative sequences
"""
for g in tqdm(self.genes):
g.protein.get_sequence_properties(clean_seq=clean_seq, representative_only=representatives_only)
def get_sequence_sliding_window_properties(self, scale, window, representatives_only=True):
"""Run Biopython ProteinAnalysis and EMBOSS pepstats to summarize basic statistics of all protein sequences.
Results are stored in the protein's respective SeqProp objects at ``.annotations``
Args:
representative_only (bool): If analysis should only be run on the representative sequences
"""
for g in tqdm(self.genes):
g.protein.get_sequence_sliding_window_properties(scale=scale, window=window,
representative_only=representatives_only)
def get_scratch_predictions(self, path_to_scratch, results_dir, scratch_basename='scratch', num_cores=1,
exposed_buried_cutoff=25, custom_gene_mapping=None):
"""Run and parse ``SCRATCH`` results to predict secondary structure and solvent accessibility.
Annotations are stored in the protein's representative sequence at:
* ``.annotations``
* ``.letter_annotations``
Args:
path_to_scratch (str): Path to SCRATCH executable
results_dir (str): Path to SCRATCH results folder, which will have the files (scratch.ss, scratch.ss8,
scratch.acc, scratch.acc20)
scratch_basename (str): Basename of the SCRATCH results ('scratch' is default)
num_cores (int): Number of cores to use to parallelize SCRATCH run
exposed_buried_cutoff (int): Cutoff of exposed/buried for the acc20 predictions
custom_gene_mapping (dict): Default parsing of SCRATCH output files is to look for the model gene IDs. If
your output files contain IDs which differ from the model gene IDs, use this dictionary to map model
gene IDs to result file IDs. Dictionary keys must match model genes.
"""
if not self.genome_path:
# Write all sequences as one file
all_seqs = self.write_representative_sequences_file(outname=self.id)
# Runs SCRATCH or loads existing results in results_dir
scratch = SCRATCH(project_name=scratch_basename, seq_file=self.genome_path)
scratch.run_scratch(path_to_scratch=path_to_scratch, num_cores=num_cores, outdir=results_dir)
sspro_summary = scratch.sspro_summary()
sspro8_summary = scratch.sspro8_summary()
sspro_results = scratch.sspro_results()
sspro8_results = scratch.sspro8_results()
accpro_summary = scratch.accpro_summary()
accpro20_summary = scratch.accpro20_summary(exposed_buried_cutoff)
accpro_results = scratch.accpro_results()
accpro20_results = scratch.accpro20_results()
counter = 0
# Adding the scratch annotations to the representative_sequences letter_annotations
for g in tqdm(self.genes_with_a_representative_sequence):
if custom_gene_mapping:
g_id = custom_gene_mapping[g.id]
else:
g_id = g.id
if g_id in sspro_summary:
# Secondary structure
g.protein.representative_sequence.annotations.update(sspro_summary[g_id])
g.protein.representative_sequence.annotations.update(sspro8_summary[g_id])
try:
g.protein.representative_sequence.letter_annotations['SS-sspro'] = sspro_results[g_id]
g.protein.representative_sequence.letter_annotations['SS-sspro8'] = sspro8_results[g_id]
except TypeError:
log.error('Gene {}, SeqProp {}: sequence length mismatch between SCRATCH results and representative '
'sequence, unable to set letter annotation'.format(g_id, g.protein.representative_sequence.id))
# Solvent accessibility
g.protein.representative_sequence.annotations.update(accpro_summary[g_id])
g.protein.representative_sequence.annotations.update(accpro20_summary[g_id])
try:
g.protein.representative_sequence.letter_annotations['RSA-accpro'] = accpro_results[g_id]
g.protein.representative_sequence.letter_annotations['RSA-accpro20'] = accpro20_results[g_id]
except TypeError:
log.error('Gene {}, SeqProp {}: sequence length mismatch between SCRATCH results and representative '
'sequence, unable to set letter annotation'.format(g_id, g.protein.representative_sequence.id))
counter += 1
else:
log.error('{}: missing SCRATCH results'.format(g.id))
log.info('{}/{}: number of genes with SCRATCH predictions loaded'.format(counter, len(self.genes)))
def get_tmhmm_predictions(self, tmhmm_results, custom_gene_mapping=None):
"""Parse TMHMM results and store in the representative sequences.
This is a basic function to parse pre-run TMHMM results. Run TMHMM from the
web service (http://www.cbs.dtu.dk/services/TMHMM/) by doing the following:
1. Write all representative sequences in the GEM-PRO using the function ``write_representative_sequences_file``
2. Upload the file to http://www.cbs.dtu.dk/services/TMHMM/ and choose "Extensive, no graphics" as the output
3. Copy and paste the results (ignoring the top header and above "HELP with output formats") into a file and save it
4. Run this function on that file
Args:
tmhmm_results (str): Path to TMHMM results (long format)
custom_gene_mapping (dict): Default parsing of TMHMM output is to look for the model gene IDs. If
your output file contains IDs which differ from the model gene IDs, use this dictionary to map model
gene IDs to result file IDs. Dictionary keys must match model genes.
"""
# TODO: refactor to Protein class?
tmhmm_dict = ssbio.protein.sequence.properties.tmhmm.parse_tmhmm_long(tmhmm_results)
counter = 0
for g in tqdm(self.genes_with_a_representative_sequence):
if custom_gene_mapping:
g_id = custom_gene_mapping[g.id]
else:
g_id = g.id
if g_id in tmhmm_dict:
log.debug('{}: loading TMHMM results'.format(g.id))
if not tmhmm_dict[g_id]:
log.error("{}: missing TMHMM results".format(g.id))
g.protein.representative_sequence.annotations['num_tm_helix-tmhmm'] = tmhmm_dict[g_id]['num_tm_helices']
try:
g.protein.representative_sequence.letter_annotations['TM-tmhmm'] = tmhmm_dict[g_id]['sequence']
counter += 1
except TypeError:
log.error('Gene {}, SeqProp {}: sequence length mismatch between TMHMM results and representative '
'sequence, unable to set letter annotation'.format(g_id, g.protein.representative_sequence.id))
else:
log.error("{}: missing TMHMM results".format(g.id))
log.info('{}/{}: number of genes with TMHMM predictions loaded'.format(counter, len(self.genes)))
### END SEQUENCE RELATED METHODS ###
####################################################################################################################
####################################################################################################################
### STRUCTURE RELATED METHODS ###
def blast_seqs_to_pdb(self, seq_ident_cutoff=0, evalue=0.0001, all_genes=False, display_link=False,
outdir=None, force_rerun=False):
"""BLAST each representative protein sequence to the PDB. Saves raw BLAST results (XML files).
Args:
seq_ident_cutoff (float, optional): Cutoff results based on percent coverage (in decimal form)
evalue (float, optional): Cutoff for the E-value - filters for significant hits. 0.001 is liberal,
0.0001 is stringent (default).
all_genes (bool): If all genes should be BLASTed, or only those without any structures currently mapped
display_link (bool, optional): Set to True if links to the HTML results should be displayed
outdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories
were not created initially
force_rerun (bool, optional): If existing BLAST results should not be used, set to True. Default is False
"""
counter = 0
for g in tqdm(self.genes_with_a_representative_sequence):
# If all_genes=False, BLAST only genes without a uniprot -> pdb mapping
if g.protein.num_structures_experimental > 0 and not all_genes and not force_rerun:
log.debug('{}: skipping BLAST, {} experimental structures already mapped '
'and all_genes flag is False'.format(g.id,
g.protein.num_structures_experimental))
continue
# BLAST the sequence to the PDB
new_pdbs = g.protein.blast_representative_sequence_to_pdb(seq_ident_cutoff=seq_ident_cutoff,
evalue=evalue,
display_link=display_link,
outdir=outdir,
force_rerun=force_rerun)
if new_pdbs:
counter += 1
log.debug('{}: {} PDBs BLASTed'.format(g.id, len(new_pdbs)))
else:
log.debug('{}: no BLAST results'.format(g.id))
log.info('Completed sequence --> PDB BLAST. See the "df_pdb_blast" attribute for a summary dataframe.')
log.info('{}: number of genes with additional structures added from BLAST'.format(counter))
@property
def df_pdb_blast(self):
"""DataFrame: Get a dataframe of PDB BLAST results"""
df = pd.DataFrame()
for g in self.genes_with_experimental_structures:
protein_df = g.protein.df_pdb_blast.copy().reset_index()
if not protein_df.empty:
protein_df['gene'] = g.id
df = df.append(protein_df)
if df.empty:
log.warning('Empty dataframe')
return df
else:
return ssbio.utils.clean_df(df.set_index('gene'))
def map_uniprot_to_pdb(self, seq_ident_cutoff=0.0, outdir=None, force_rerun=False):
"""Map all representative sequences' UniProt ID to PDB IDs using the PDBe "Best Structures" API.
Will save a JSON file of the results to each protein's ``sequences`` folder.
The "Best structures" API is available at https://www.ebi.ac.uk/pdbe/api/doc/sifts.html
The list of PDB structures mapping to a UniProt accession sorted by coverage of the protein and,
if the same, resolution.
Args:
seq_ident_cutoff (float): Sequence identity cutoff in decimal form
outdir (str): Output directory to cache JSON results of search
force_rerun (bool): Force re-downloading of JSON results if they already exist
Returns:
list: A rank-ordered list of PDBProp objects that map to the UniProt ID
"""
# First get all UniProt IDs and check if they have PDBs
all_representative_uniprots = []
for g in self.genes_with_a_representative_sequence:
uniprot_id = g.protein.representative_sequence.uniprot
if uniprot_id:
# TODO: add warning or something for isoform ids?
if '-' in uniprot_id:
uniprot_id = uniprot_id.split('-')[0]
all_representative_uniprots.append(uniprot_id)
log.info('Mapping UniProt IDs --> PDB IDs...')
uniprots_to_pdbs = bs_unip.mapping(fr='ACC', to='PDB_ID', query=all_representative_uniprots)
counter = 0
# Now run the best_structures API for all genes
for g in tqdm(self.genes_with_a_representative_sequence):
uniprot_id = g.protein.representative_sequence.uniprot
if uniprot_id:
if '-' in uniprot_id:
uniprot_id = uniprot_id.split('-')[0]
if uniprot_id in uniprots_to_pdbs:
best_structures = g.protein.map_uniprot_to_pdb(seq_ident_cutoff=seq_ident_cutoff, outdir=outdir, force_rerun=force_rerun)
if best_structures:
counter += 1
log.debug('{}: {} PDBs mapped'.format(g.id, len(best_structures)))
else:
log.debug('{}, {}: no PDBs available'.format(g.id, uniprot_id))
log.info('{}/{}: number of genes with at least one experimental structure'.format(len(self.genes_with_experimental_structures),
len(self.genes)))
log.info('Completed UniProt --> best PDB mapping. See the "df_pdb_ranking" attribute for a summary dataframe.')
@property
def df_pdb_ranking(self):
"""DataFrame: Get a dataframe of UniProt -> best structure in PDB results"""
df = pd.DataFrame()
for g in self.genes_with_experimental_structures:
protein_df = g.protein.df_pdb_ranking.copy().reset_index()
if not protein_df.empty:
protein_df['gene'] = g.id
df = df.append(protein_df)
if df.empty:
log.warning('Empty dataframe')
return df
else:
return ssbio.utils.clean_df(df.set_index('gene'))
@property
def missing_pdb_structures(self):
"""list: List of genes with no mapping to any experimental PDB structure."""
return [x.id for x in self.genes if not self.genes_with_experimental_structures.has_id(x.id)]
def get_manual_homology_models(self, input_dict, outdir=None, clean=True, force_rerun=False):
"""Copy homology models to the GEM-PRO project.
Requires an input of a dictionary formatted like so::
{
model_gene: {
homology_model_id1: {
'model_file': '/path/to/homology/model.pdb',
'file_type': 'pdb'
'additional_info': info_value
},
homology_model_id2: {
'model_file': '/path/to/homology/model.pdb'
'file_type': 'pdb'
}
}
}
Args:
input_dict (dict): Dictionary of dictionaries of gene names to homology model IDs and other information
outdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories
were not created initially
clean (bool): If homology files should be cleaned and saved as a new PDB file
force_rerun (bool): If homology files should be copied again even if they exist in the GEM-PRO directory
"""
if outdir:
outdir_set = True
else:
outdir_set = False
counter = 0
for g in tqdm(self.genes):
if g.id not in input_dict:
continue
if not outdir_set:
outdir = g.protein.structure_dir
if not outdir:
raise ValueError('Output directory must be specified')
for hid, hdict in input_dict[g.id].items():
if 'model_file' not in hdict or 'file_type' not in hdict:
raise KeyError('"model_file" and "file_type" must be keys in the manual input dictionary.')
new_homology = g.protein.load_pdb(pdb_id=hid, pdb_file=hdict['model_file'],
file_type=hdict['file_type'], is_experimental=False)
if clean:
new_homology.load_structure_path(new_homology.clean_structure(outdir=outdir, force_rerun=force_rerun),
hdict['file_type'])
else:
copy_to = op.join(outdir, op.basename(hdict['model_file']))
if ssbio.utils.force_rerun(force_rerun, copy_to):
# Just copy the file to the structure directory and store the file name
log.debug('{}: copying model from original directory to GEM-PRO directory'.format(op.basename(hdict['model_file'])))
shutil.copy2(hdict['model_file'], outdir)
new_homology.load_structure_path(copy_to, hdict['file_type'])
else:
log.debug('{}: homology model already copied to directory'.format(copy_to))
new_homology.load_structure_path(copy_to, hdict['file_type'])
# TODO: need to better handle other info in the provided dictionary, if any
new_homology.update(hdict)
log.debug('{}: updated homology model information and copied model file.'.format(g.id))
counter += 1
log.info('Updated homology model information for {} genes.'.format(counter))
def get_itasser_models(self, homology_raw_dir, custom_itasser_name_mapping=None, outdir=None, force_rerun=False):
"""Copy generated I-TASSER models from a directory to the GEM-PRO directory.
Args:
homology_raw_dir (str): Root directory of I-TASSER folders.
custom_itasser_name_mapping (dict): Use this if your I-TASSER folder names differ from your model gene names.
Input a dict of {model_gene: ITASSER_folder}.
outdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories
were not created initially
force_rerun (bool): If homology files should be copied again even if they exist in the GEM-PRO directory
"""
counter = 0
for g in tqdm(self.genes):
if custom_itasser_name_mapping and g.id in custom_itasser_name_mapping:
hom_id = custom_itasser_name_mapping[g.id]
if not op.exists(op.join(homology_raw_dir, hom_id)):
hom_id = g.id
else:
hom_id = g.id
# The name of the actual pdb file will be $GENEID_model1.pdb
new_itasser_name = hom_id + '_model1'
orig_itasser_dir = op.join(homology_raw_dir, hom_id)
try:
itasser_prop = g.protein.load_itasser_folder(ident=hom_id, itasser_folder=orig_itasser_dir,
organize=True, outdir=outdir,
organize_name=new_itasser_name,
force_rerun=force_rerun)
except OSError:
log.debug('{}: homology model folder unavailable'.format(g.id))
continue
except IOError:
log.debug('{}: homology model unavailable'.format(g.id))
continue
if itasser_prop.structure_file:
counter += 1
else:
log.debug('{}: homology model file unavailable, perhaps modelling did not finish'.format(g.id))
log.info('Completed copying of {} I-TASSER models to GEM-PRO directory. See the "df_homology_models" attribute for a summary dataframe.'.format(counter))
@property
def df_homology_models(self):
"""DataFrame: Get a dataframe of I-TASSER homology model results"""
df = pd.DataFrame()
for g in self.genes_with_homology_models:
protein_df = g.protein.df_homology_models.copy().reset_index()
if not protein_df.empty:
protein_df['gene'] = g.id
df = df.append(protein_df)
if df.empty:
log.warning('Empty dataframe')
return df
else:
return ssbio.utils.clean_df(df.set_index('gene'))
@property
def missing_homology_models(self):
"""list: List of genes with no mapping to any homology models."""
return [x.id for x in self.genes if not self.genes_with_homology_models.has_id(x.id)]
def set_representative_structure(self, seq_outdir=None, struct_outdir=None, pdb_file_type=None,
engine='needle', always_use_homology=False, rez_cutoff=0.0,
seq_ident_cutoff=0.5, allow_missing_on_termini=0.2,
allow_mutants=True, allow_deletions=False,
allow_insertions=False, allow_unresolved=True, skip_large_structures=False,
clean=True, force_rerun=False):
"""Set all representative structure for proteins from a structure in the structures attribute.
Each gene can have a combination of the following, which will be analyzed to set a representative structure.
* Homology model(s)
* Ranked PDBs
* BLASTed PDBs
If the ``always_use_homology`` flag is true, homology models are always set as representative when they exist.
If there are multiple homology models, we rank by the percent sequence coverage.
Args:
seq_outdir (str): Path to output directory of sequence alignment files, must be set if GEM-PRO directories
were not created initially
struct_outdir (str): Path to output directory of structure files, must be set if GEM-PRO directories
were not created initially
pdb_file_type (str): ``pdb``, ``mmCif``, ``xml``, ``mmtf`` - file type for files downloaded from the PDB
engine (str): ``biopython`` or ``needle`` - which pairwise alignment program to use.
``needle`` is the standard EMBOSS tool to run pairwise alignments.
``biopython`` is Biopython's implementation of needle. Results can differ!
always_use_homology (bool): If homology models should always be set as the representative structure
rez_cutoff (float): Resolution cutoff, in Angstroms (only if experimental structure)
seq_ident_cutoff (float): Percent sequence identity cutoff, in decimal form
allow_missing_on_termini (float): Percentage of the total length of the reference sequence which will be ignored
when checking for modifications. Example: if 0.1, and reference sequence is 100 AA, then only residues
5 to 95 will be checked for modifications.
allow_mutants (bool): If mutations should be allowed or checked for
allow_deletions (bool): If deletions should be allowed or checked for
allow_insertions (bool): If insertions should be allowed or checked for
allow_unresolved (bool): If unresolved residues should be allowed or checked for
skip_large_structures (bool): Default False -- currently, large structures can't be saved as a PDB file even
if you just want to save a single chain, so Biopython will throw an error when trying to do so. As an
alternative, if a large structure is selected as representative, the pipeline will currently point to it
and not clean it. If you don't want this to happen, set this to true.
clean (bool): If structures should be cleaned
force_rerun (bool): If sequence to structure alignment should be rerun
Todo:
- Remedy large structure representative setting
"""
for g in tqdm(self.genes):
repstruct = g.protein.set_representative_structure(seq_outdir=seq_outdir,
struct_outdir=struct_outdir,
pdb_file_type=pdb_file_type,
engine=engine,
rez_cutoff=rez_cutoff,
seq_ident_cutoff=seq_ident_cutoff,
always_use_homology=always_use_homology,
allow_missing_on_termini=allow_missing_on_termini,
allow_mutants=allow_mutants,
allow_deletions=allow_deletions,
allow_insertions=allow_insertions,
allow_unresolved=allow_unresolved,
skip_large_structures=skip_large_structures,
clean=clean,
force_rerun=force_rerun)
log.info('{}/{}: number of genes with a representative structure'.format(len(self.genes_with_a_representative_structure),
len(self.genes)))
log.info('See the "df_representative_structures" attribute for a summary dataframe.')
def set_representative_structure_parallelize(self, sc, seq_outdir=None, struct_outdir=None, pdb_file_type=None,
engine='needle', always_use_homology=False, rez_cutoff=0.0,
seq_ident_cutoff=0.5, allow_missing_on_termini=0.2,
allow_mutants=True, allow_deletions=False,
allow_insertions=False, allow_unresolved=True, skip_large_structures=False,
clean=True, force_rerun=False):
def set_repstruct(g, seq_outdir=seq_outdir, struct_outdir=struct_outdir,
pdb_file_type=pdb_file_type, engine=engine,
rez_cutoff=rez_cutoff, seq_ident_cutoff=seq_ident_cutoff,
always_use_homology=always_use_homology,
allow_missing_on_termini=allow_missing_on_termini,
allow_mutants=allow_mutants, allow_deletions=allow_deletions,
allow_insertions=allow_insertions, allow_unresolved=allow_unresolved,
skip_large_structures=skip_large_structures,
clean=clean, force_rerun=force_rerun):
g.protein.set_representative_structure(seq_outdir=seq_outdir, struct_outdir=struct_outdir,
pdb_file_type=pdb_file_type, engine=engine,
rez_cutoff=rez_cutoff, seq_ident_cutoff=seq_ident_cutoff,
always_use_homology=always_use_homology,
allow_missing_on_termini=allow_missing_on_termini,
allow_mutants=allow_mutants, allow_deletions=allow_deletions,
allow_insertions=allow_insertions, allow_unresolved=allow_unresolved,
skip_large_structures=skip_large_structures,
clean=clean, force_rerun=force_rerun)
return g
genes_rdd = sc.parallelize(self.genes)
result = genes_rdd.map(set_repstruct).collect()
# Copy the results over to the GEM-PRO object's genes using the GenePro function "copy_modified_gene"
# Also count how many genes mapped to KEGG
for modified_g in result:
original_gene = self.genes.get_by_id(modified_g.id)
original_gene.copy_modified_gene(modified_g)
log.info('{}/{}: number of genes with a representative structure'.format(len(self.genes_with_a_representative_structure),
len(self.genes)))
log.info('See the "df_representative_structures" attribute for a summary dataframe.')
@property
def df_representative_structures(self):
"""DataFrame: Get a dataframe of representative protein structure information."""
rep_struct_pre_df = []
df_cols = ['gene', 'id', 'is_experimental', 'file_type', 'structure_file']
for g in self.genes_with_a_representative_structure:
repdict = g.protein.representative_structure.get_dict(df_format=True, only_attributes=df_cols)
repdict['gene'] = g.id
rep_struct_pre_df.append(repdict)
df = pd.DataFrame.from_records(rep_struct_pre_df, columns=df_cols).set_index('gene')
if df.empty:
log.warning('Empty dataframe')
return df
else:
return ssbio.utils.clean_df(df)
@property
def missing_representative_structure(self):
"""list: List of genes with no mapping to a representative structure."""
return [x.id for x in self.genes if not self.genes_with_a_representative_structure.has_id(x.id)]
def prep_itasser_modeling(self, itasser_installation, itlib_folder, runtype, create_in_dir=None,
execute_from_dir=None, all_genes=False, print_exec=False, **kwargs):
"""Prepare to run I-TASSER homology modeling for genes without structures, or all genes.
Args:
itasser_installation (str): Path to I-TASSER folder, i.e. ``~/software/I-TASSER4.4``
itlib_folder (str): Path to ITLIB folder, i.e. ``~/software/ITLIB``
runtype: How you will be running I-TASSER - local, slurm, or torque
create_in_dir (str): Local directory where folders will be created, if not provided default is the
GEM-PRO's ``data_dir``
execute_from_dir (str): Optional path to execution directory - use this if you are copying the homology
models to another location such as a supercomputer for running
all_genes (bool): If all genes should be prepped, or only those without any mapped structures
print_exec (bool): If the execution statement should be printed to run modelling
Todo:
* Document kwargs - extra options for I-TASSER, SLURM or Torque execution
* Allow modeling of any sequence in sequences attribute, select by ID or provide SeqProp?
"""
if not create_in_dir:
if not self.data_dir:
raise ValueError('Output directory must be specified')
self.homology_models_dir = op.join(self.data_dir, 'homology_models')
else:
self.homology_models_dir = create_in_dir
ssbio.utils.make_dir(self.homology_models_dir)
if not execute_from_dir:
execute_from_dir = self.homology_models_dir
counter = 0
for g in self.genes_with_a_representative_sequence:
repstruct = g.protein.representative_structure
if repstruct and not all_genes:
log.debug('{}: representative structure set, skipping homology modeling'.format(g.id))
continue
g.protein.prep_itasser_modeling(itasser_installation=itasser_installation,
itlib_folder=itlib_folder, runtype=runtype,
create_in_dir=self.homology_models_dir,
execute_from_dir=execute_from_dir,
print_exec=print_exec, **kwargs)
counter += 1
log.info('Prepared I-TASSER modeling folders for {} genes in folder {}'.format(counter,
self.homology_models_dir))
def pdb_downloader_and_metadata(self, outdir=None, pdb_file_type=None, force_rerun=False):
"""Download ALL mapped experimental structures to each protein's structures directory.
Args:
outdir (str): Path to output directory, if GEM-PRO directories were not set or other output directory is
desired
pdb_file_type (str): Type of PDB file to download, if not already set or other format is desired
force_rerun (bool): If files should be re-downloaded if they already exist
"""
if not pdb_file_type:
pdb_file_type = self.pdb_file_type
counter = 0
for g in tqdm(self.genes):
pdbs = g.protein.pdb_downloader_and_metadata(outdir=outdir, pdb_file_type=pdb_file_type, force_rerun=force_rerun)
if pdbs:
counter += len(pdbs)
log.info('Updated PDB metadata dataframe. See the "df_pdb_metadata" attribute for a summary dataframe.')
log.info('Saved {} structures total'.format(counter))
def download_all_pdbs(self, outdir=None, pdb_file_type=None, load_metadata=False, force_rerun=False):
if not pdb_file_type:
pdb_file_type = self.pdb_file_type
all_structures = []
for g in tqdm(self.genes):
pdbs = g.protein.download_all_pdbs(outdir=outdir, pdb_file_type=pdb_file_type,
load_metadata=load_metadata, force_rerun=force_rerun)
all_structures.extend(pdbs)
return list(set(all_structures))
@property
def df_pdb_metadata(self):
"""DataFrame: Get a dataframe of PDB metadata (PDBs have to be downloaded first)."""
df = pd.DataFrame()
for g in self.genes_with_experimental_structures:
# Get per protein DataFrame
protein_df = g.protein.df_pdb_metadata.copy().reset_index()
protein_df['gene'] = g.id
df = df.append(protein_df)
if df.empty:
log.warning('Empty dataframe')
return df
else:
return ssbio.utils.clean_df(df.set_index('gene'))
@property
def df_proteins(self):
"""DataFrame: Get a summary dataframe of all proteins in the project."""
pre_df = []
df_cols = ['gene', 'id', 'sequences', 'num_sequences', 'representative_sequence',
'repseq_gene_name', 'repseq_uniprot', 'repseq_description',
'num_structures', 'experimental_structures', 'num_experimental_structures',
'homology_models', 'num_homology_models',
'representative_structure', 'representative_chain', 'representative_chain_seq_coverage',
'repstruct_description', 'repstruct_resolution',
'num_sequence_alignments', 'num_structure_alignments']
for g in self.genes:
# Get per protein DataFrame
protein_dict = g.protein.protein_statistics
protein_dict['gene'] = g.id
pre_df.append(protein_dict)
df = pd.DataFrame.from_records(pre_df, columns=df_cols).set_index('gene')
if df.empty:
log.warning('Empty dataframe')
return df
else:
return ssbio.utils.clean_df(df)
def get_dssp_annotations(self, representatives_only=True, force_rerun=False):
"""Run DSSP on structures and store calculations.
Annotations are stored in the protein structure's chain sequence at:
``<chain_prop>.seq_record.letter_annotations['*-dssp']``
Args:
representative_only (bool): If analysis should only be run on the representative structure
force_rerun (bool): If calculations should be rerun even if an output file exists
"""
for g in tqdm(self.genes):
g.protein.get_dssp_annotations(representative_only=representatives_only, force_rerun=force_rerun)
def get_dssp_annotations_parallelize(self, sc, representatives_only=True, force_rerun=False):
"""Run DSSP on structures and store calculations.
Annotations are stored in the protein structure's chain sequence at:
``<chain_prop>.seq_record.letter_annotations['*-dssp']``
Args:
representative_only (bool): If analysis should only be run on the representative structure
force_rerun (bool): If calculations should be rerun even if an output file exists
"""
genes_rdd = sc.parallelize(self.genes)
def get_dssp_annotation(g):
g.protein.get_dssp_annotations(representative_only=representatives_only, force_rerun=force_rerun)
return g
result = genes_rdd.map(get_dssp_annotation).collect()
for modified_g in result:
original_gene = self.genes.get_by_id(modified_g.id)
original_gene.copy_modified_gene(modified_g)
def get_msms_annotations(self, representatives_only=True, force_rerun=False):
"""Run MSMS on structures and store calculations.
Annotations are stored in the protein structure's chain sequence at:
``<chain_prop>.seq_record.letter_annotations['*-msms']``
Args:
representative_only (bool): If analysis should only be run on the representative structure
force_rerun (bool): If calculations should be rerun even if an output file exists
"""
for g in tqdm(self.genes):
g.protein.get_msms_annotations(representative_only=representatives_only, force_rerun=force_rerun)
def get_msms_annotations_parallelize(self, sc, representatives_only=True, force_rerun=False):
"""Run MSMS on structures and store calculations.
Annotations are stored in the protein structure's chain sequence at:
``<chain_prop>.seq_record.letter_annotations['*-msms']``
Args:
representative_only (bool): If analysis should only be run on the representative structure
force_rerun (bool): If calculations should be rerun even if an output file exists
"""
genes_rdd = sc.parallelize(self.genes)
def get_msms_annotation(g):
g.protein.get_msms_annotations(representative_only=representatives_only, force_rerun=force_rerun)
return g
result = genes_rdd.map(get_msms_annotation).collect()
for modified_g in result:
original_gene = self.genes.get_by_id(modified_g.id)
original_gene.copy_modified_gene(modified_g)
def get_freesasa_annotations(self, include_hetatms=False, representatives_only=True, force_rerun=False):
"""Run freesasa on structures and store calculations.
Annotations are stored in the protein structure's chain sequence at:
``<chain_prop>.seq_record.letter_annotations['*-freesasa']``
Args:
include_hetatms (bool): If HETATMs should be included in calculations. Defaults to ``False``.
representative_only (bool): If analysis should only be run on the representative structure
force_rerun (bool): If calculations should be rerun even if an output file exists
"""
for g in tqdm(self.genes):
g.protein.get_freesasa_annotations(include_hetatms=include_hetatms,
representative_only=representatives_only,
force_rerun=force_rerun)
def get_freesasa_annotations_parallelize(self, sc, include_hetatms=False,
representatives_only=True, force_rerun=False):
"""Run freesasa on structures and store calculations.
Annotations are stored in the protein structure's chain sequence at:
``<chain_prop>.seq_record.letter_annotations['*-freesasa']``
Args:
include_hetatms (bool): If HETATMs should be included in calculations. Defaults to ``False``.
representative_only (bool): If analysis should only be run on the representative structure
force_rerun (bool): If calculations should be rerun even if an output file exists
"""
genes_rdd = sc.parallelize(self.genes)
def get_freesasa_annotation(g):
g.protein.get_freesasa_annotations(include_hetatms=include_hetatms,
representative_only=representatives_only,
force_rerun=force_rerun)
return g
result = genes_rdd.map(get_freesasa_annotation).collect()
for modified_g in result:
original_gene = self.genes.get_by_id(modified_g.id)
original_gene.copy_modified_gene(modified_g)
def get_all_pdbflex_info(self):
counter = 0
logging.disable(logging.WARNING)
for g in tqdm(self.genes_with_a_representative_sequence):
try:
g.protein.get_all_pdbflex_info()
counter+=1
except Exception as e:
# log.exception(e)
continue
logging.disable(logging.NOTSET)
log.info('{}: successful PDB flex mappings'.format(counter))
def find_disulfide_bridges(self, representatives_only=True):
"""Run Biopython's disulfide bridge finder and store found bridges.
Annotations are stored in the protein structure's chain sequence at:
``<chain_prop>.seq_record.annotations['SSBOND-biopython']``
Args:
representative_only (bool): If analysis should only be run on the representative structure
"""
for g in tqdm(self.genes):
g.protein.find_disulfide_bridges(representative_only=representatives_only)
def find_disulfide_bridges_parallelize(self, sc, representatives_only=True):
"""Run Biopython's disulfide bridge finder and store found bridges.
Annotations are stored in the protein structure's chain sequence at:
``<chain_prop>.seq_record.annotations['SSBOND-biopython']``
Args:
representative_only (bool): If analysis should only be run on the representative structure
"""
genes_rdd = sc.parallelize(self.genes)
def find_disulfide_bridges(g):
g.protein.find_disulfide_bridges(representative_only=representatives_only)
return g
result = genes_rdd.map(find_disulfide_bridges).collect()
for modified_g in result:
original_gene = self.genes.get_by_id(modified_g.id)
original_gene.copy_modified_gene(modified_g)
### END STRUCTURE RELATED METHODS ###
####################################################################################################################
def __json_encode__(self):
to_return = {}
# Don't save properties, methods in the JSON
for x in [a for a in dir(self) if not a.startswith('__') and not isinstance(getattr(type(self), a, None), property) and not callable(getattr(self,a))]:
if self.model and x == 'genes':
continue
to_return.update({x: getattr(self, x)})
return to_return
def __json_decode__(self, **attrs):
for k, v in attrs.items():
setattr(self, k, v)
if not self.model:
self.genes = DictList(self.genes)
else:
self.genes = self.model.genes
def save_protein_pickles_and_reset_protein(self):
"""Save all Proteins as pickle files -- currently development code for parallelization purposes. Also clears the
protein attribute in all genes!"""
self.gene_protein_pickles = {}
for g in tqdm(self.genes):
if g.protein.representative_sequence:
initproteinpickle = op.join(g.protein.protein_dir, '{}_protein.pckl'.format(g.id))
g.protein.save_pickle(initproteinpickle)
self.gene_protein_pickles[g.id] = initproteinpickle
g.reset_protein()
else:
g.reset_protein()
def load_protein_pickles(self):
log.info('Loading Protein pickles into GEM-PRO...')
for g_id, protein in tqdm(self.gene_protein_pickles.items()):
g = self.genes.get_by_id(g_id)
g.protein = ssbio.io.load_pickle(protein) | {
"content_hash": "f269e92920e1e0fdcdf32d819eec12a3",
"timestamp": "",
"source": "github",
"line_count": 1610,
"max_line_length": 161,
"avg_line_length": 48.55652173913043,
"alnum_prop": 0.5932639173147769,
"repo_name": "nmih/ssbio",
"id": "9e6690574d5f9438123bcd44c45c5bc6b3ba554b",
"size": "78176",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ssbio/pipeline/gempro.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "3957"
},
{
"name": "Perl",
"bytes": "170187"
},
{
"name": "Python",
"bytes": "2101974"
},
{
"name": "Scheme",
"bytes": "8711"
}
],
"symlink_target": ""
} |
"""
Enumerations related to text in WordprocessingML files
"""
from __future__ import absolute_import, print_function, unicode_literals
class WD_BREAK_TYPE(object):
"""
Corresponds to WdBreakType enumeration
http://msdn.microsoft.com/en-us/library/office/ff195905.aspx
"""
COLUMN = 8
LINE = 6
LINE_CLEAR_LEFT = 9
LINE_CLEAR_RIGHT = 10
LINE_CLEAR_ALL = 11 # added for consistency, not in MS version
PAGE = 7
SECTION_CONTINUOUS = 3
SECTION_EVEN_PAGE = 4
SECTION_NEXT_PAGE = 2
SECTION_ODD_PAGE = 5
TEXT_WRAPPING = 11
WD_BREAK = WD_BREAK_TYPE
| {
"content_hash": "a90a45dcf0b84b7d629a018216d78cc5",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 72,
"avg_line_length": 24.16,
"alnum_prop": 0.6672185430463576,
"repo_name": "sk1tt1sh/python-docx",
"id": "9260e6fa09feec23a257b97b5591fb20a31c5d77",
"size": "623",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docx/enum/text.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "551267"
}
],
"symlink_target": ""
} |
from hackru import manager
if __name__ == '__main__':
manager.run()
| {
"content_hash": "5681706435fb068b5a16bd721e42b6a6",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 26,
"avg_line_length": 18.25,
"alnum_prop": 0.589041095890411,
"repo_name": "HackRU/FlaskRU",
"id": "72fc993c2b95d8cd2336f754bfb2fbbe49658f6e",
"size": "92",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrate.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "23188"
},
{
"name": "HTML",
"bytes": "130270"
},
{
"name": "JavaScript",
"bytes": "2302"
},
{
"name": "Python",
"bytes": "16869"
}
],
"symlink_target": ""
} |
from collections import namedtuple
from itertools import chain, izip
import re
from pycparser import c_ast
from decomp import ida, utils
from decomp.c import types as ep_ct
from decomp.cpu import regs
from decomp.cpu.mips import abi, data, insns
next_ea_and_c = namedtuple('next_ea_and_c', ['next_ea', 'c'])
def reg_strip(arg):
# generally here the "arg" is a register object whose __str__ will be called
return re.sub(r'^\$', '', str(arg))
def fmt_reg(mnem, arg, slot=None):
'''str -> int -> reg -> opt:slot_types -> str'''
if arg == regs.gpr(0):
return c_ast.Constant('int', '0')
else:
insn = insns.insns[mnem]
stripped = reg_strip(arg)
# XXX FIXME: we store the register gpr/fpr register number, but
# regs_by_reference is offsets into IDA's register list
regnum = arg.reg + (abi.fpr_off if type(arg) is regs.fpr else 0)
if regnum in abi.regs_by_reference:
# refer to argument via ARGS->
r = c_ast.StructRef(c_ast.ID(utils.args_tag), '->',
c_ast.ID(stripped))
else:
r = c_ast.ID(stripped)
if slot is not None and slot != ep_ct.slot_types._:
# use a union slot
return c_ast.StructRef(r, '.', c_ast.ID(slot.name))
else:
return r
def fmt_op(arg, mnem, op=None):
'''op_ty -> str -> opt:int -> c_ast()'''
insn = insns.insns[mnem]
def addrof_or_deref(arg):
# since addiu cannot touch memory, it must be calculating an address
if mnem == 'addiu' or (insn.ty == insns.types.usefn and
insn.subst == 'memcpy'):
return c_ast.UnaryOp('&', arg)
else:
return arg
if arg.ty == ida.op_ty.reg:
try:
slot = insn.slots[op]
except IndexError:
slot = None
reg = fmt_reg(mnem, arg.val, slot)
if insn.ty == insns.types.usefn and insn.subst == 'memcpy':
return c_ast.UnaryOp('&', reg)
else:
return reg
if mnem == 'la':
# XXX the "name" type is neither suitable for a decompiler nor a
# braindead static translator such as this one. i.e., in order to
# translate to C, we have no choice but to deal with C's type system,
# because things like "la ptr" should be translated as "reg = ptr", but
# "la not_ptr" should be translated as "reg = ¬_ptr"
if arg.ty in [ida.op_ty.array, ida.op_ty.ptr]:
return c_ast.ID(arg.val)
elif arg.ty == ida.op_ty.name:
return c_ast.UnaryOp('&', c_ast.ID(arg.val))
else: # an address
return c_ast.Constant('int', str(arg.val))
elif arg.ty == ida.op_ty.array:
(idx, rem) = ida.item_off(arg.target)
arr = c_ast.ArrayRef(c_ast.ID(arg.val), c_ast.Constant('int',
str(idx)))
return addrof_or_deref(arr)
# retained in case we ever come across some strange pointer math. this
# will generate a nonsense lvalue anyway, so we'd need to handle it some
# other way
#left = addrof_or_deref(arr)
#return c_ast.BinaryOp('+', left, c_ast.Constant('int', str(rem)))
elif arg.ty == ida.op_ty.ptr:
# dereferencing of pointers is handled by the "displ" case, so just
# return an address here too
(_, rem) = ida.item_off(arg.target)
return c_ast.ID(arg.val)
# same as above
#return c_ast.BinaryOp('+',
# c_ast.ID(arg.val),
# c_ast.Constant('int', str(rem)))
elif arg.ty == ida.op_ty.name:
nameval = c_ast.ID(arg.val)
return addrof_or_deref(nameval)
elif arg.ty == ida.op_ty.displ:
r = fmt_reg(mnem, arg.val.reg, ep_ct.slot_types.u32)
off = c_ast.BinaryOp(
'+', r, c_ast.Constant('int', str(arg.val.displ)))
tyns = ['char' if insns.types.usefn and insn.subst == 'memcpy'
else insn.subst]
cast = ep_ct.simple_cast(ep_ct.ptr(ep_ct.simple_typename(tyns)), off)
if insn.ty == insns.types.usefn and insn.subst == 'memcpy':
return cast
else:
return c_ast.UnaryOp('*', cast)
else:
return c_ast.Constant('int', str(arg.val))
def do_fbranch(**kw):
return c_ast.If(c_ast.ID(kw['subst']), ep_ct.do_jump(**kw), None)
def do_fcmp(**kw):
#if ({rs} {subst} {rt}) fp_cond = 1; else fp_cond = 0
fragment = ep_ct.do_branch(**kw)
fragment.iftrue = ep_ct.do_assign(
rt=c_ast.ID('%sfp_cond' % utils.decomp_tag),
op=c_ast.Constant('int', '1'))
fragment.iffalse = ep_ct.do_assign(
rt=c_ast.ID('%sfp_cond' % utils.decomp_tag),
op=c_ast.Constant('int', '0'))
return fragment
def do_slt(**kw):
#if (({subst}){rs} < ({subst}){rt}) {rd} = 1; else {rd} = 0;
def cast(which):
return ep_ct.simple_cast(ep_ct.simple_typename([kw['subst']]),
which)
def assign(to):
return ep_ct.do_assign(rt=kw['rd'],
op=c_ast.Constant('int', to))
br = ep_ct.do_branch(subst='<', rs=cast(kw['rs']), rt=cast(kw['rt']))
br.iftrue = assign('1')
br.iffalse = assign('0')
return br
def do_store(**kw):
#{op} = {rt}
return ep_ct.do_assign(rt=kw['op'], op=kw['rt'])
def do_lui(**kw):
#{rt} = {op} << 16
return ep_ct.do_assign(rt=kw['rt'], op=ep_ct.simple_cast(
ep_ct.slot_to_typename[kw['result']],
c_ast.BinaryOp(
'<<', kw['op'], c_ast.Constant('int', '16'))))
def create_insn_to_c_table(tbl):
'''{str : mips_insn} -> {str : fun}'''
insn_tmpls = {
insns.types.op : ep_ct.do_op,
insns.types.jump : ep_ct.do_jump,
insns.types.fbranch : do_fbranch,
# as branch likely complicates things, "branch" is a partial template
insns.types.branch : ep_ct.do_branch, #'if ({rs} {subst} {rt})',
insns.types.fcmp : do_fcmp,
insns.types.load : ep_ct.do_assign,
insns.types.store : do_store,
insns.types.nop : ep_ct.do_nop,
insns.types.slt : do_slt,
insns.types.la : ep_ct.do_assign,
insns.types.li : ep_ct.do_assign,
insns.types.lui : do_lui
}
return {x.insn : insn_tmpls[x.ty] for x in tbl.itervalues()
if x.ty not in [insns.types.usefn, insns.types.call,
insns.types.jr]}
insns_c = create_insn_to_c_table(insns.insns)
def extern_call(callee, sig, mnem, ea):
'''str -> fn_sig -> str -> ea_t -> c_ast'''
# generate an ast node for a call to an external function
def fmt_reg_for_call(reg, slot, node):
'''reg -> slot_ty -> c_ast -> c_ast'''
reg_ast = fmt_reg(mnem, reg, slot)
return ep_ct.simple_cast(node, reg_ast)
if sig.arg_regs == []:
fn_args = None
else:
if sig.arg_regs[-1] is c_ast.EllipsisParam:
va_arg = data.get_arg_for_va_function(callee, ea)
params = (list(utils.init(sig.arg_regs))
+ abi.get_args_for_va_function(callee, va_arg))
else:
params = sig.arg_regs
fn_args = ep_ct.args_for_call(
list(fmt_reg_for_call(reg, slot, node)
for (reg, slot, node) in params))
if sig.return_type is None:
return ep_ct.make_call(callee, args=fn_args)
else:
(reg, slot, rtype) = sig.return_type
ret_reg = fmt_reg(mnem, reg, slot)
return ep_ct.make_call(callee, ret_reg=ret_reg, args=fn_args,
for_extern=rtype)
def do_switch_or_return(ea):
if ida.is_ret_insn(ea):
return c_ast.Return(None)
else: # switch
try:
sw = ida.switch_cases(ea)
except ida.NoSwitchError:
raise utils.BugError('unhandled jr at ea %s' % ida.atoa(ea))
default = sw.default
defexpr = [c_ast.Default([c_ast.Goto(ida.name(default))])]
cases = list(c_ast.Case(c_ast.Constant('int', str(addr)),
[c_ast.Goto(ida.name(loc))])
for (addr, loc) in sw.cases.iteritems())
(mnem, opnd, opn) = data.get_swval(ea)
swval = fmt_op(opnd, mnem, opn)
return c_ast.Switch(swval,
c_ast.Compound(
cases + defexpr))
def get_formatter(mnem):
'''str -> fn'''
try:
return insns_c[mnem]
except KeyError:
raise utils.BugError("%s: couldn't find formatter" % mnem)
def make_args_for_formatter(insn, vals):
'''mips_insn -> <dyn> -> [<dyn>]'''
# takes list of values from fmt_op for the formatter's consumption
def simple_2op(insn, opvals):
# cast the rvalue to the lvalue's type to avoid a warning for la
# XXX do lw too?
#
# XXX we should also handle sw here, but op_ty doesn't carry enough
# information to be able to do this. see decomp.ida for why op_ty
# sucks. we need to be able to cast to the type of the lvalue, but
# op_ty's knowledge of types isn't expressive enough
op = (ep_ct.cast_to_dest_reg(insn, opvals[1])
if insn.insn == 'la'
else opvals[1])
return dict(izip(['result', 'rt', 'op'],
[insn.result, opvals[0], op]))
def subst_rs_rt(insn, opvals):
rt = (c_ast.Constant('int', '0')
if insns.subtypes.zero in insn.subty # for beqz, bgez, etc.
else opvals[1])
return dict(izip(['subst', 'rs', 'rt'],
[insn.subst, opvals[0], rt]))
subst_3op = lambda insn, opvals: dict(
izip(['subst', 'result', 'rd', 'rs', 'rt'],
[insn.subst, insn.result] + opvals))
sw = {
insns.types.op : subst_3op,
insns.types.jump : lambda insn, opvals: dict(
izip(['loc'], opvals)),
insns.types.fbranch : lambda insn, opvals: dict(
izip(['subst', 'loc'],
[insn.subst] + opvals)),
insns.types.branch : subst_rs_rt,
insns.types.fcmp : subst_rs_rt,
insns.types.load : simple_2op,
insns.types.store : simple_2op,
insns.types.slt : subst_3op,
insns.types.la : simple_2op,
insns.types.li : simple_2op,
insns.types.lui : simple_2op,
insns.types.call : lambda insn, opvals: dict(
izip(['rs'], opvals))
}
try:
return sw[insn.ty](insn, vals)
except KeyError:
return {}
def fmt_insn(ea, our_fns, extern_reg_map, stkvars, from_delay):
'''ea_t -> frozenset(str) -> {str : reg_sig} -> {int : c_ast()} ->
(ea_t, str)'''
# XXX this function is too long and its interaction with the formatter steps
# is not very clear
# NOTE mutation in a few places
#
# we cannot rely simply on IDA's disassembly when generating C. e.g.:
#
# .text:100052F4 lwc1 $f12, (square - 0x10008E50)($s1)
#
# THIS means f12 = square[0] (square is declared as an array). but...
#
# .text:100041A4 lw $a1, (seqList - 0x1000BF78)($a1)
#
# THIS means a1 = seqList--NOT *seqList or seqList[0]. GetOperand and
# similar functions are thus useless for our purposes. unfortunately, we
# have no choice but to handle C's type system in order to emit C from
# disassembly. we don't COMPLETELY handle it (patches welcome!!!1), but we
# do achieve enough that with minor database annotations we have a POC in
# our chosen target.
def labelize(ea, stmt):
if from_delay is False:
label = ida.name(ea)
if label != '':
return c_ast.Label(label, stmt)
else:
return stmt
else:
return stmt
fn = ida.get_func(ea)
fn_name = ida.get_func_name(ea)
fn_end = fn.endEA
mnem = ida.get_mnem(ea)
insn = insns.insns[mnem]
is_delayed = mnem in insns.delayed
delay_ea = ida.next_head(ea, fn_end)
next_ea = (delay_ea
if is_delayed is False
else ida.next_head(delay_ea, fn_end))
if ida.is_switch_insn(ea) is True:
# don't emit stmts that IDA marks as being part of a switch idiom
#
# pass delay_ea as the next ea to check, because we may have a case in
# which a non-switch insn follows a delayed switch insn
return next_ea_and_c(delay_ea, [labelize(ea, c_ast.EmptyStatement())])
opvals = ida.get_opvals(ea, stkvars)
# addiu has many forms, some of which require transformation into
# two-operand statements, others which need to be kept as three-operand
# statements, so we have to handle it here, not fmt_op
#
# we can elide a previous modification to that register within a basic
# block if it has no uses between a modification and the addiu, though we
# don't yet do this
if mnem == 'addiu' and opvals[-1].ty != ida.op_ty.value:
# handles cases where addiu is effectively an assignment (e.g. when
# used for address calculation)
# first op is always a register
reg = fmt_op(opvals[0], mnem, 0)
# any non-number as the final operand should be handled according to
# fmt_op's usual rules
arg = fmt_op(opvals[-1], mnem)
assign = labelize(
ea,
ep_ct.do_assign(rt=reg, op=ep_ct.cast_to_dest_reg(insn, arg)))
return next_ea_and_c(next_ea, [assign])
if mnem == 'trunc.w.d':
# emulate trunc.w.d with our function
vals = [fmt_reg(mnem, opvals[0].val, insn.result),
fmt_reg(mnem, opvals[1].val, insn.slots[0])]
return next_ea_and_c(
next_ea,
[labelize(
ea,
ep_ct.make_call(insn.subst, ret_reg=vals[0],
args=ep_ct.args_for_call([vals[1]])))])
elif mnem in ['jalr', 'jr']:
# jalr and jr need special handling
vals = []
else:
vals = list(fmt_op(x, mnem, op) for (op, x) in enumerate(opvals))
if insn.ty == insns.types.usefn:
if insn.subst == 'memcpy':
# this should be redesigned to not use memcpy just to make the
# generated code a little nicer, but the complexity hasn't been
# worth it. the issue is: the fact that [ls][dw]c1 move data
# between the fpu and memory is no guarantee that the data held in
# an fpu register is actually a float or a double, which complicates
# the logic a little bit. fortunately, we can just use memcpy
# instead, and modern compilers will inline it so that it's
# equivalent to a load/store for small sizes.
if mnem in ['ldc1', 'sdc1']:
size = 8
elif mnem in ['lwc1', 'swc1']:
size = 4
else:
raise utils.BugError('unhandled usefn insn %s' % mnem)
# need to swap the order of arguments for a store, since loads and
# stores are written in the same direction, but they aren't in C!
args = list(reversed(vals) if mnem.startswith('s') else vals)
return next_ea_and_c(
next_ea,
[labelize(
ea,
ep_ct.make_call(
insn.subst, args=ep_ct.args_for_call(
[args[0],
args[1],
c_ast.Constant('int', str(size))])))])
else:
raise utils.BugError('unhandled usefn instruction %s' % mnem)
else:
args = make_args_for_formatter(insn, vals)
if is_delayed is True:
# format our delayed instruction before processing this instruction--but
# see below for an important note about the case of branch likely
(_, delay_slot) = fmt_insn(delay_ea, our_fns, extern_reg_map, stkvars,
from_delay=True)
# branch target
loc = opvals[-1].val
if mnem == 'jr':
delayed = do_switch_or_return(ea)
elif insn.ty == insns.types.call:
callee = data.get_callee(ea, mnem, args)
if callee in our_fns:
delayed = ep_ct.internal_call(callee)
else: # external function call
try:
sig = extern_reg_map[callee]
except KeyError:
# XXX we should really modify pycparser to allow insertion
# of comments, as it would make the emitted codemuch easier
# to follow. just alert the user that we couldn't make some
# calls for now
print ('/* %s: no regmap info, emitting empty stmt at %s */'
% (callee, ida.atoa(ea)))
delayed = ep_ct.do_nop()
else:
delayed = extern_call(callee, sig, mnem, ea)
else: # some other sort of delayed insn
delayed = get_formatter(mnem)(**args)
goto = c_ast.Goto(loc)
if insns.subtypes.likely in insn.subty:
# for branch likely, the delay slot is NOT executed if the branch is
# not taken
delayed.iftrue = c_ast.Compound(delay_slot +
[goto,
labelize(delay_ea, delay_slot[0])])
ret = labelize(ea, delayed)
else:
if insn.ty in [insns.types.branch, insns.types.fbranch]:
delayed.iftrue = c_ast.Compound(delay_slot + [goto])
delayed.iffalse = labelize(delay_ea, delay_slot[0])
ret = labelize(ea, delayed)
else:
ret = labelize(ea,
c_ast.Compound([labelize(delay_ea, delay_slot[0]),
delayed]))
return next_ea_and_c(next_ea, [ret])
return next_ea_and_c(next_ea, [labelize(ea, get_formatter(mnem)(**args))])
| {
"content_hash": "8e299e669579de691dd1627c3a85812b",
"timestamp": "",
"source": "github",
"line_count": 457,
"max_line_length": 81,
"avg_line_length": 39.98030634573304,
"alnum_prop": 0.5423348475726562,
"repo_name": "drvink/epanos",
"id": "05373604621f5b89d3cc2e02a5da9ffdc73ea9e8",
"size": "18271",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "decomp/cpu/mips/gen.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "62262"
},
{
"name": "C",
"bytes": "198157"
},
{
"name": "Perl",
"bytes": "408"
},
{
"name": "Python",
"bytes": "84416"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.apps import AppConfig
class WebConfig(AppConfig):
name = 'web'
| {
"content_hash": "881626e1ccb1066e93be5b12902377ee",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 39,
"avg_line_length": 18.428571428571427,
"alnum_prop": 0.6976744186046512,
"repo_name": "ieiayaobb/lushi8",
"id": "9d7d5b4b6d66a5fcbfab3d7b538cce0be1f0df8c",
"size": "129",
"binary": false,
"copies": "1",
"ref": "refs/heads/lean",
"path": "web/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "33768"
},
{
"name": "HTML",
"bytes": "4822"
},
{
"name": "JavaScript",
"bytes": "17133"
},
{
"name": "Python",
"bytes": "28950"
}
],
"symlink_target": ""
} |
"""
=====================================================
Convert a 3-color image (JPG) to separate FITS images
=====================================================
This example opens an RGB JPEG image and writes out each channel as a separate
FITS (image) file.
This example uses `pillow <http://python-pillow.org>`_ to read the image,
`matplotlib.pyplot` to display the image, and `astropy.io.fits` to save FITS files.
-------------------
*By: Erik Bray, Adrian Price-Whelan*
*License: BSD*
-------------------
"""
import numpy as np
from PIL import Image
from astropy.io import fits
##############################################################################
# Set up matplotlib and use a nicer set of plot parameters
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
##############################################################################
# Load and display the original 3-color jpeg image:
image = Image.open('Hs-2009-14-a-web.jpg')
xsize, ysize = image.size
print("Image size: {} x {}".format(xsize, ysize))
plt.imshow(image)
##############################################################################
# Split the three channels (RGB) and get the data as Numpy arrays. The arrays
# are flattened, so they are 1-dimensional:
r, g, b = image.split()
r_data = np.array(r.getdata()) # data is now an array of length ysize*xsize
g_data = np.array(g.getdata())
b_data = np.array(b.getdata())
print(r_data.shape)
##############################################################################
# Reshape the image arrays to be 2-dimensional:
r_data = r_data.reshape(ysize, xsize)
g_data = g_data.reshape(ysize, xsize)
b_data = b_data.reshape(ysize, xsize)
##############################################################################
# Write out the channels as separate FITS images
red = fits.PrimaryHDU(data=r_data)
red.header['LATOBS'] = "32:11:56" # add spurious header info
red.header['LONGOBS'] = "110:56"
red.writeto('red.fits')
green = fits.PrimaryHDU(data=g_data)
green.header['LATOBS'] = "32:11:56"
green.header['LONGOBS'] = "110:56"
green.writeto('green.fits')
blue = fits.PrimaryHDU(data=b_data)
blue.header['LATOBS'] = "32:11:56"
blue.header['LONGOBS'] = "110:56"
blue.writeto('blue.fits')
##############################################################################
# Delete the files created
import os
os.remove('red.fits')
os.remove('green.fits')
os.remove('blue.fits')
| {
"content_hash": "c0543ae0495191cf3e85d3cc5b9424a4",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 83,
"avg_line_length": 30.71604938271605,
"alnum_prop": 0.5409967845659164,
"repo_name": "kelle/astropy",
"id": "c3bb5d0819dc2c5014dc311d705dbbacfa2333df",
"size": "2512",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "examples/io/split-jpeg-to-fits.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "366877"
},
{
"name": "C++",
"bytes": "1825"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Jupyter Notebook",
"bytes": "62553"
},
{
"name": "Python",
"bytes": "8072264"
},
{
"name": "Shell",
"bytes": "446"
},
{
"name": "TeX",
"bytes": "778"
}
],
"symlink_target": ""
} |
import inspect
import logging
import types
from concurrent import futures
import six
from taskflow.engines.worker_based import endpoint
from taskflow.engines.worker_based import server
from taskflow.openstack.common import importutils
from taskflow import task as t_task
from taskflow.utils import reflection
from taskflow.utils import threading_utils as tu
LOG = logging.getLogger(__name__)
class Worker(object):
"""Worker that can be started on a remote host for handling tasks requests.
:param url: broker url
:param exchange: broker exchange name
:param topic: topic name under which worker is stated
:param tasks: tasks list that worker is capable to perform
Tasks list item can be one of the following types:
1. String:
1.1 Python module name:
> tasks=['taskflow.tests.utils']
1.2. Task class (BaseTask subclass) name:
> tasks=['taskflow.test.utils.DummyTask']
3. Python module:
> from taskflow.tests import utils
> tasks=[utils]
4. Task class (BaseTask subclass):
> from taskflow.tests import utils
> tasks=[utils.DummyTask]
:param executor: custom executor object that is used for processing
requests in separate threads
:keyword threads_count: threads count to be passed to the default executor
:keyword transport: broker transport to be used (e.g. amqp, memory, etc.)
:keyword transport_options: broker transport options
"""
def __init__(self, exchange, topic, tasks, executor=None, **kwargs):
self._topic = topic
self._executor = executor
self._threads_count = kwargs.pop('threads_count',
tu.get_optimal_thread_count())
if self._executor is None:
self._executor = futures.ThreadPoolExecutor(self._threads_count)
self._endpoints = self._derive_endpoints(tasks)
self._server = server.Server(topic, exchange, self._executor,
self._endpoints, **kwargs)
@staticmethod
def _derive_endpoints(tasks):
"""Derive endpoints from list of strings, classes or packages."""
derived_tasks = set()
for item in tasks:
module = None
if isinstance(item, six.string_types):
try:
pkg, cls = item.split(':')
except ValueError:
module = importutils.import_module(item)
else:
obj = importutils.import_class('%s.%s' % (pkg, cls))
if not reflection.is_subclass(obj, t_task.BaseTask):
raise TypeError("Item %s is not a BaseTask subclass" %
item)
derived_tasks.add(obj)
elif isinstance(item, types.ModuleType):
module = item
elif reflection.is_subclass(item, t_task.BaseTask):
derived_tasks.add(item)
else:
raise TypeError("Item %s unexpected type: %s" %
(item, type(item)))
# derive tasks
if module is not None:
for name, obj in inspect.getmembers(module):
if reflection.is_subclass(obj, t_task.BaseTask):
derived_tasks.add(obj)
return [endpoint.Endpoint(task) for task in derived_tasks]
def run(self):
"""Run worker."""
LOG.info("Starting the '%s' topic worker in %s threads." %
(self._topic, self._threads_count))
LOG.info("Tasks list:")
for endpoint in self._endpoints:
LOG.info("|-- %s" % endpoint)
self._server.start()
def wait(self):
"""Wait until worker is started."""
self._server.wait()
def stop(self):
"""Stop worker."""
self._server.stop()
| {
"content_hash": "ccea5ae10f7ccef8a0546d91daf95196",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 79,
"avg_line_length": 34.85087719298246,
"alnum_prop": 0.5779008306065945,
"repo_name": "varunarya10/taskflow",
"id": "609339de80e6c1f47c6a0c3aaf3d1139d7c1b229",
"size": "4630",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "taskflow/engines/worker_based/worker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "734087"
},
{
"name": "Shell",
"bytes": "1988"
}
],
"symlink_target": ""
} |
import io
import os
import tarfile
import zipfile
from .distribution import Distribution
class SDist(Distribution):
def __init__(self, filename, metadata_version=None):
self.filename = filename
self.metadata_version = metadata_version
self.extractMetadata()
@classmethod
def _get_archive(cls, fqn):
if not os.path.exists(fqn):
raise ValueError('No such file: %s' % fqn)
if fqn.endswith('.zip'):
archive = zipfile.ZipFile(fqn)
names = archive.namelist()
def read_file(name):
return archive.read(name)
elif fqn.endswith('gz') or fqn.endswith('bz2'):
archive = tarfile.TarFile.open(fqn)
names = archive.getnames()
def read_file(name):
return archive.extractfile(name).read()
else:
raise ValueError('Not a known archive format: %s' % fqn)
return archive, names, read_file
def read(self):
fqn = os.path.abspath(
os.path.normpath(self.filename))
archive, names, read_file = self._get_archive(fqn)
try:
tuples = [x.split('/') for x in names if 'PKG-INFO' in x]
schwarz = sorted([(len(x), x) for x in tuples])
for path in [x[1] for x in schwarz]:
candidate = '/'.join(path)
data = read_file(candidate)
if b'Metadata-Version' in data:
return data
finally:
archive.close()
raise ValueError('No PKG-INFO in archive: %s' % fqn)
class UnpackedSDist(SDist):
def __init__(self, filename, metadata_version=None):
if os.path.isdir(filename):
pass
elif os.path.isfile(filename):
filename = os.path.dirname(filename)
else:
raise ValueError('No such file: %s' % filename)
super(UnpackedSDist, self).__init__(
filename, metadata_version=metadata_version)
def read(self):
try:
pkg_info = os.path.join(self.filename, 'PKG-INFO')
with io.open(pkg_info, errors='ignore') as f:
return f.read()
except Exception as e:
raise ValueError('Could not load %s as an unpacked sdist: %s'
% (self.filename, e))
| {
"content_hash": "8af83a5489599af99137037bc1b5778b",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 73,
"avg_line_length": 31.493333333333332,
"alnum_prop": 0.5520745131244708,
"repo_name": "sonntagsgesicht/regtest",
"id": "33d7844faff23da1511c1efc72dc2e3abd0dd632",
"size": "2362",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": ".aux/venv/lib/python3.9/site-packages/pkginfo/sdist.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13888"
}
],
"symlink_target": ""
} |
"""Tests for grr.parsers.osx_file_parser."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import io
import os
from absl import app
import biplist
from grr_response_core.lib import parsers
from grr_response_core.lib.parsers import osx_file_parser
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import client_action as rdf_client_action
from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr.test_lib import test_lib
class TestOSXFileParsing(test_lib.GRRBaseTest):
"""Test parsing of OSX files."""
def testOSXUsersParser(self):
"""Ensure we can extract users from a passwd file."""
paths = ["/Users/user1", "/Users/user2", "/Users/Shared"]
statentries = []
for path in paths:
statentries.append(
rdf_client_fs.StatEntry(
pathspec=rdf_paths.PathSpec(
path=path, pathtype=rdf_paths.PathSpec.PathType.OS),
st_mode=16877))
statentries.append(
rdf_client_fs.StatEntry(
pathspec=rdf_paths.PathSpec(
path="/Users/.localized",
pathtype=rdf_paths.PathSpec.PathType.OS),
st_mode=33261))
parser = osx_file_parser.OSXUsersParser()
out = list(parser.ParseMultiple(statentries, None))
self.assertCountEqual([x.username for x in out], ["user1", "user2"])
self.assertCountEqual([x.homedir for x in out],
["/Users/user1", "/Users/user2"])
def testOSXSPHardwareDataTypeParserInvalidInput(self):
parser = osx_file_parser.OSXSPHardwareDataTypeParser()
response = rdf_client_action.ExecuteResponse()
response.request.cmd = "/usr/sbin/system_profiler"
response.request.args = ["-xml", "SPHardwareDataType"]
response.stdout = "chrząszcz brzmi w trzcinie".encode("utf-8")
response.stdout = b""
response.exit_status = 0
with self.assertRaises(parsers.ParseError) as context:
list(parser.ParseResponse(None, response, rdf_paths.PathSpec.PathType.OS))
exception = context.exception
self.assertIsInstance(exception.cause, biplist.InvalidPlistException)
def testOSXSPHardwareDataTypeParser(self):
parser = osx_file_parser.OSXSPHardwareDataTypeParser()
content = open(os.path.join(self.base_path, "system_profiler.xml"),
"rb").read()
result = list(
parser.Parse("/usr/sbin/system_profiler", ["SPHardwareDataType -xml"],
content, "", 0, None))
self.assertEqual(result[0].serial_number, "C02JQ0F5F6L9")
self.assertEqual(result[0].bios_version, "MBP101.00EE.B02")
self.assertEqual(result[0].system_product_name, "MacBookPro10,1")
def testOSXLaunchdPlistParser(self):
parser = osx_file_parser.OSXLaunchdPlistParser()
plists = ["com.google.code.grr.plist", "com.google.code.grr.bplist"]
results = []
for plist in plists:
path = os.path.join(self.base_path, "parser_test", plist)
plist_file = open(path, "rb")
pathspec = rdf_paths.PathSpec.OS(path=path)
results.extend(list(parser.ParseFile(None, pathspec, plist_file)))
for result in results:
self.assertEqual(result.Label, "com.google.code.grr")
self.assertCountEqual(result.ProgramArguments, [
"/usr/lib/grr/grr_3.0.0.5_amd64/grr",
"--config=/usr/lib/grr/grr_3.0.0.5_amd64/grr.yaml"
])
def testOSXInstallHistoryPlistParserInvalidInput(self):
parser = osx_file_parser.OSXInstallHistoryPlistParser()
pathspec = rdf_paths.PathSpec.OS(path=os.path.join("foo", "bar", "baz"))
contents = io.BytesIO("zażółć gęślą jaźń".encode("utf-8"))
with self.assertRaises(parsers.ParseError) as context:
list(parser.ParseFile(None, pathspec, contents))
exception = context.exception
self.assertIsInstance(exception.cause, biplist.InvalidPlistException)
def testOSXInstallHistoryPlistParser(self):
parser = osx_file_parser.OSXInstallHistoryPlistParser()
path = os.path.join(self.base_path, "parser_test", "InstallHistory.plist")
pathspec = rdf_paths.PathSpec.OS(path=path)
with io.open(path, "rb") as plist_file:
results = list(parser.ParseFile(None, pathspec, plist_file))
self.assertLen(results, 1)
self.assertIsInstance(results[0], rdf_client.SoftwarePackages)
packages = results[0].packages
# ESET AV
self.assertEqual(packages[0].name, "ESET NOD32 Antivirus")
self.assertEqual(packages[0].version, "")
self.assertEqual(
packages[0].description,
"com.eset.esetNod32Antivirus.ESETNOD32Antivirus.pkg,"
"com.eset.esetNod32Antivirus.GUI_startup.pkg,"
"com.eset.esetNod32Antivirus.pkgid.pkg,"
"com.eset.esetNod32Antivirus.com.eset.esets_daemon.pkg,"
"com.eset.esetNod32Antivirus.esetsbkp.pkg,"
"com.eset.esetNod32Antivirus.esets_kac_64_106.pkg")
# echo $(( $(date --date="2017-07-20T18:40:22Z" +"%s") * 1000000))
self.assertEqual(packages[0].installed_on, 1500576022000000)
self.assertEqual(packages[0].install_state,
rdf_client.SoftwarePackage.InstallState.INSTALLED)
# old grr agent
self.assertEqual(packages[1].name, "grr")
self.assertEqual(packages[1].version, "")
self.assertEqual(packages[1].description, "com.google.code.grr.grr_3.2.1.0")
# echo $(( $(date --date="2018-03-13T05:39:17Z" +"%s") * 1000000))
self.assertEqual(packages[1].installed_on, 1520919557000000)
self.assertEqual(packages[1].install_state,
rdf_client.SoftwarePackage.InstallState.INSTALLED)
# new grr agent
self.assertEqual(packages[2].name, "grr")
self.assertEqual(packages[2].version, "")
self.assertEqual(packages[2].description, "com.google.code.grr.grr_3.2.3.2")
# echo $(( $(date --date="2018-08-07T16:07:10Z" +"%s") * 1000000))
self.assertEqual(packages[2].installed_on, 1533658030000000)
self.assertEqual(packages[2].install_state,
rdf_client.SoftwarePackage.InstallState.INSTALLED)
# Sierra
self.assertEqual(packages[3].name, "macOS Sierra Update")
self.assertEqual(packages[3].version, "10.12.6")
self.assertEqual(
packages[3].description, "com.apple.pkg.update.os.10.12.6Patch.16G29,"
"com.apple.pkg.FirmwareUpdate,"
"com.apple.update.fullbundleupdate.16G29,"
"com.apple.pkg.EmbeddedOSFirmware")
# echo $(( $(date --date="2017-07-25T04:26:10Z" +"%s") * 1000000))
self.assertEqual(packages[3].installed_on, 1500956770000000)
self.assertEqual(packages[3].install_state,
rdf_client.SoftwarePackage.InstallState.INSTALLED)
def main(argv):
# Run the full test suite
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
| {
"content_hash": "4a8dcf4329a3828dbbb6bbd5a98551dd",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 80,
"avg_line_length": 40.582352941176474,
"alnum_prop": 0.6831424844180316,
"repo_name": "demonchild2112/travis-test",
"id": "f4a1d90cd81f895ec8dec81de2f029ac84e99a11",
"size": "6957",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "grr/core/grr_response_core/lib/parsers/osx_file_parser_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "Batchfile",
"bytes": "3446"
},
{
"name": "C",
"bytes": "11321"
},
{
"name": "C++",
"bytes": "54535"
},
{
"name": "CSS",
"bytes": "35549"
},
{
"name": "Dockerfile",
"bytes": "1819"
},
{
"name": "HCL",
"bytes": "7208"
},
{
"name": "HTML",
"bytes": "190212"
},
{
"name": "JavaScript",
"bytes": "11691"
},
{
"name": "Jupyter Notebook",
"bytes": "199190"
},
{
"name": "Makefile",
"bytes": "3139"
},
{
"name": "PowerShell",
"bytes": "1984"
},
{
"name": "Python",
"bytes": "7213255"
},
{
"name": "Roff",
"bytes": "444"
},
{
"name": "Shell",
"bytes": "48882"
},
{
"name": "Standard ML",
"bytes": "8172"
},
{
"name": "TSQL",
"bytes": "51"
}
],
"symlink_target": ""
} |
import pendulum
from flexmock import flexmock, flexmock_teardown
from ... import OratorTestCase
from ...utils import MockConnection
from orator.query.builder import QueryBuilder
from orator.query.grammars import QueryGrammar
from orator.query.processors import QueryProcessor
from orator.query.expression import QueryExpression
from orator.orm.builder import Builder
from orator.orm.model import Model
from orator.orm.relations import BelongsToMany
from orator.orm.relations.pivot import Pivot
from orator.orm.collection import Collection
class OrmBelongsToTestCase(OratorTestCase):
def tearDown(self):
flexmock_teardown()
def test_models_are_properly_hydrated(self):
model1 = OrmBelongsToManyModelStub()
model1.fill(name="john", pivot_user_id=1, pivot_role_id=2)
model2 = OrmBelongsToManyModelStub()
model2.fill(name="jane", pivot_user_id=3, pivot_role_id=4)
models = [model1, model2]
base_builder = flexmock(
Builder(
QueryBuilder(
MockConnection().prepare_mock(), QueryGrammar(), QueryProcessor()
)
)
)
relation = self._get_relation()
relation.get_parent().should_receive("get_connection_name").and_return(
"foo.connection"
)
relation.get_query().get_query().should_receive("add_select").once().with_args(
*[
"roles.*",
"user_role.user_id AS pivot_user_id",
"user_role.role_id AS pivot_role_id",
]
).and_return(relation.get_query())
relation.get_query().should_receive("get_models").once().and_return(models)
relation.get_query().should_receive("eager_load_relations").once().with_args(
models
).and_return(models)
relation.get_related().should_receive("new_collection").replace_with(
lambda l: Collection(l)
)
relation.get_query().should_receive("get_query").once().and_return(base_builder)
results = relation.get()
self.assertIsInstance(results, Collection)
# Make sure the foreign keys were set on the pivot models
self.assertEqual("user_id", results[0].pivot.get_foreign_key())
self.assertEqual("role_id", results[0].pivot.get_other_key())
self.assertEqual("john", results[0].name)
self.assertEqual(1, results[0].pivot.user_id)
self.assertEqual(2, results[0].pivot.role_id)
self.assertEqual("foo.connection", results[0].pivot.get_connection_name())
self.assertEqual("jane", results[1].name)
self.assertEqual(3, results[1].pivot.user_id)
self.assertEqual(4, results[1].pivot.role_id)
self.assertEqual("foo.connection", results[1].pivot.get_connection_name())
self.assertEqual("user_role", results[0].pivot.get_table())
self.assertTrue(results[0].pivot.exists)
def test_timestamps_can_be_retrieved_properly(self):
model1 = OrmBelongsToManyModelStub()
model1.fill(name="john", pivot_user_id=1, pivot_role_id=2)
model2 = OrmBelongsToManyModelStub()
model2.fill(name="jane", pivot_user_id=3, pivot_role_id=4)
models = [model1, model2]
base_builder = flexmock(
Builder(
QueryBuilder(
MockConnection().prepare_mock(), QueryGrammar(), QueryProcessor()
)
)
)
relation = self._get_relation().with_timestamps()
relation.get_parent().should_receive("get_connection_name").and_return(
"foo.connection"
)
relation.get_query().get_query().should_receive("add_select").once().with_args(
"roles.*",
"user_role.user_id AS pivot_user_id",
"user_role.role_id AS pivot_role_id",
"user_role.created_at AS pivot_created_at",
"user_role.updated_at AS pivot_updated_at",
).and_return(relation.get_query())
relation.get_query().should_receive("get_models").once().and_return(models)
relation.get_query().should_receive("eager_load_relations").once().with_args(
models
).and_return(models)
relation.get_related().should_receive("new_collection").replace_with(
lambda l: Collection(l)
)
relation.get_query().should_receive("get_query").once().and_return(base_builder)
results = relation.get()
def test_models_are_properly_matched_to_parents(self):
relation = self._get_relation()
result1 = OrmBelongsToManyModelPivotStub()
result1.pivot.user_id = 1
result2 = OrmBelongsToManyModelPivotStub()
result2.pivot.user_id = 2
result3 = OrmBelongsToManyModelPivotStub()
result3.pivot.user_id = 2
model1 = OrmBelongsToManyModelStub()
model1.id = 1
model2 = OrmBelongsToManyModelStub()
model2.id = 2
model3 = OrmBelongsToManyModelStub()
model3.id = 3
relation.get_related().should_receive("new_collection").replace_with(
lambda l=None: Collection(l)
)
models = relation.match(
[model1, model2, model3], Collection([result1, result2, result3]), "foo"
)
self.assertEqual(1, models[0].foo[0].pivot.user_id)
self.assertEqual(1, len(models[0].foo))
self.assertEqual(2, models[1].foo[0].pivot.user_id)
self.assertEqual(2, models[1].foo[1].pivot.user_id)
self.assertEqual(2, len(models[1].foo))
self.assertTrue(models[2].foo.is_empty())
def test_relation_is_properly_initialized(self):
relation = self._get_relation()
relation.get_related().should_receive("new_collection").replace_with(
lambda l=None: Collection(l or [])
)
model = flexmock(Model())
model.should_receive("set_relation").once().with_args("foo", Collection)
models = relation.init_relation([model], "foo")
self.assertEqual([model], models)
def test_eager_constraints_are_properly_added(self):
relation = self._get_relation()
relation.get_query().get_query().should_receive("where_in").once().with_args(
"user_role.user_id", [1, 2]
)
model1 = OrmBelongsToManyModelStub()
model1.id = 1
model2 = OrmBelongsToManyModelStub()
model2.id = 2
relation.add_eager_constraints([model1, model2])
def test_attach_inserts_pivot_table_record(self):
flexmock(BelongsToMany, touch_if_touching=lambda: True)
relation = self._get_relation()
query = flexmock()
query.should_receive("from_").once().with_args("user_role").and_return(query)
query.should_receive("insert").once().with_args(
[{"user_id": 1, "role_id": 2, "foo": "bar"}]
).and_return(True)
mock_query_builder = flexmock()
relation.get_query().should_receive("get_query").and_return(mock_query_builder)
mock_query_builder.should_receive("new_query").once().and_return(query)
relation.should_receive("touch_if_touching").once()
relation.attach(2, {"foo": "bar"})
def test_attach_multiple_inserts_pivot_table_record(self):
flexmock(BelongsToMany, touch_if_touching=lambda: True)
relation = self._get_relation()
query = flexmock()
query.should_receive("from_").once().with_args("user_role").and_return(query)
query.should_receive("insert").once().with_args(
[
{"user_id": 1, "role_id": 2, "foo": "bar"},
{"user_id": 1, "role_id": 3, "bar": "baz", "foo": "bar"},
]
).and_return(True)
mock_query_builder = flexmock()
relation.get_query().should_receive("get_query").and_return(mock_query_builder)
mock_query_builder.should_receive("new_query").once().and_return(query)
relation.should_receive("touch_if_touching").once()
relation.attach([2, {3: {"bar": "baz"}}], {"foo": "bar"})
def test_attach_inserts_pivot_table_records_with_timestamps_when_ncessary(self):
flexmock(BelongsToMany, touch_if_touching=lambda: True)
relation = self._get_relation().with_timestamps()
query = flexmock()
query.should_receive("from_").once().with_args("user_role").and_return(query)
now = pendulum.now()
query.should_receive("insert").once().with_args(
[
{
"user_id": 1,
"role_id": 2,
"foo": "bar",
"created_at": now,
"updated_at": now,
}
]
).and_return(True)
mock_query_builder = flexmock()
relation.get_query().should_receive("get_query").and_return(mock_query_builder)
mock_query_builder.should_receive("new_query").once().and_return(query)
relation.get_parent().should_receive("fresh_timestamp").once().and_return(now)
relation.should_receive("touch_if_touching").once()
relation.attach(2, {"foo": "bar"})
def test_attach_inserts_pivot_table_records_with_a_created_at_timestamp(self):
flexmock(BelongsToMany, touch_if_touching=lambda: True)
relation = self._get_relation().with_pivot("created_at")
query = flexmock()
query.should_receive("from_").once().with_args("user_role").and_return(query)
now = pendulum.now()
query.should_receive("insert").once().with_args(
[{"user_id": 1, "role_id": 2, "foo": "bar", "created_at": now}]
).and_return(True)
mock_query_builder = flexmock()
relation.get_query().should_receive("get_query").and_return(mock_query_builder)
mock_query_builder.should_receive("new_query").once().and_return(query)
relation.get_parent().should_receive("fresh_timestamp").once().and_return(now)
relation.should_receive("touch_if_touching").once()
relation.attach(2, {"foo": "bar"})
def test_attach_inserts_pivot_table_records_with_an_updated_at_timestamp(self):
flexmock(BelongsToMany, touch_if_touching=lambda: True)
relation = self._get_relation().with_pivot("updated_at")
query = flexmock()
query.should_receive("from_").once().with_args("user_role").and_return(query)
now = pendulum.now()
query.should_receive("insert").once().with_args(
[{"user_id": 1, "role_id": 2, "foo": "bar", "updated_at": now}]
).and_return(True)
mock_query_builder = flexmock()
relation.get_query().should_receive("get_query").and_return(mock_query_builder)
mock_query_builder.should_receive("new_query").once().and_return(query)
relation.get_parent().should_receive("fresh_timestamp").once().and_return(now)
relation.should_receive("touch_if_touching").once()
relation.attach(2, {"foo": "bar"})
def test_detach_remove_pivot_table_record(self):
flexmock(BelongsToMany, touch_if_touching=lambda: True)
relation = self._get_relation()
query = flexmock()
query.should_receive("from_").once().with_args("user_role").and_return(query)
query.should_receive("where").once().with_args("user_id", 1).and_return(query)
query.should_receive("where_in").once().with_args("role_id", [1, 2, 3])
query.should_receive("delete").once().and_return(True)
mock_query_builder = flexmock()
relation.get_query().should_receive("get_query").and_return(mock_query_builder)
mock_query_builder.should_receive("new_query").once().and_return(query)
relation.should_receive("touch_if_touching").once()
self.assertTrue(relation.detach([1, 2, 3]))
def test_detach_with_single_id_remove_pivot_table_record(self):
flexmock(BelongsToMany, touch_if_touching=lambda: True)
relation = self._get_relation()
query = flexmock()
query.should_receive("from_").once().with_args("user_role").and_return(query)
query.should_receive("where").once().with_args("user_id", 1).and_return(query)
query.should_receive("where_in").once().with_args("role_id", [1])
query.should_receive("delete").once().and_return(True)
mock_query_builder = flexmock()
relation.get_query().should_receive("get_query").and_return(mock_query_builder)
mock_query_builder.should_receive("new_query").once().and_return(query)
relation.should_receive("touch_if_touching").once()
self.assertTrue(relation.detach(1))
def test_detach_clears_all_records_when_no_ids(self):
flexmock(BelongsToMany, touch_if_touching=lambda: True)
relation = self._get_relation()
query = flexmock()
query.should_receive("from_").once().with_args("user_role").and_return(query)
query.should_receive("where").once().with_args("user_id", 1).and_return(query)
query.should_receive("where_in").never()
query.should_receive("delete").once().and_return(True)
mock_query_builder = flexmock()
relation.get_query().should_receive("get_query").and_return(mock_query_builder)
mock_query_builder.should_receive("new_query").once().and_return(query)
relation.should_receive("touch_if_touching").once()
self.assertTrue(relation.detach())
def test_create_creates_new_model_and_insert_attachment_record(self):
flexmock(BelongsToMany, attach=lambda: True)
relation = self._get_relation()
model = flexmock()
relation.get_related().should_receive("new_instance").once().and_return(
model
).with_args({"foo": "bar"})
model.should_receive("save").once()
model.should_receive("get_key").and_return("foo")
relation.should_receive("attach").once().with_args("foo", {"bar": "baz"}, True)
self.assertEqual(model, relation.create({"foo": "bar"}, {"bar": "baz"}))
def test_find_or_new_finds_model(self):
flexmock(BelongsToMany)
relation = self._get_relation()
model = flexmock()
model.foo = "bar"
relation.get_query().should_receive("find").with_args("foo", None).and_return(
model
)
relation.get_related().should_receive("new_instance").never()
self.assertEqual("bar", relation.find_or_new("foo").foo)
def test_find_or_new_returns_new_model(self):
flexmock(BelongsToMany)
relation = self._get_relation()
model = flexmock()
model.foo = "bar"
relation.get_query().should_receive("find").with_args("foo", None).and_return(
None
)
relation.get_related().should_receive("new_instance").once().and_return(model)
self.assertEqual("bar", relation.find_or_new("foo").foo)
def test_first_or_new_finds_first_model(self):
flexmock(BelongsToMany)
relation = self._get_relation()
model = flexmock()
model.foo = "bar"
relation.get_query().should_receive("where").with_args(
{"foo": "bar"}
).and_return(relation.get_query())
relation.get_query().should_receive("first").once().and_return(model)
relation.get_related().should_receive("new_instance").never()
self.assertEqual("bar", relation.first_or_new({"foo": "bar"}).foo)
def test_first_or_new_returns_new_model(self):
flexmock(BelongsToMany)
relation = self._get_relation()
model = flexmock()
model.foo = "bar"
relation.get_query().should_receive("where").with_args(
{"foo": "bar"}
).and_return(relation.get_query())
relation.get_query().should_receive("first").once().and_return(None)
relation.get_related().should_receive("new_instance").once().and_return(model)
self.assertEqual("bar", relation.first_or_new({"foo": "bar"}).foo)
def test_first_or_create_finds_first_model(self):
flexmock(BelongsToMany)
relation = self._get_relation()
model = flexmock()
model.foo = "bar"
relation.get_query().should_receive("where").with_args(
{"foo": "bar"}
).and_return(relation.get_query())
relation.get_query().should_receive("first").once().and_return(model)
relation.should_receive("create").never()
self.assertEqual("bar", relation.first_or_create({"foo": "bar"}).foo)
def test_first_or_create_returns_new_model(self):
flexmock(BelongsToMany)
relation = self._get_relation()
model = flexmock()
model.foo = "bar"
relation.get_query().should_receive("where").with_args(
{"foo": "bar"}
).and_return(relation.get_query())
relation.get_query().should_receive("first").once().and_return(None)
relation.should_receive("create").once().with_args(
{"foo": "bar"}, {}, True
).and_return(model)
self.assertEqual("bar", relation.first_or_create({"foo": "bar"}).foo)
def test_update_or_create_finds_first_mode_and_updates(self):
flexmock(BelongsToMany)
relation = self._get_relation()
model = flexmock()
model.foo = "bar"
relation.get_query().should_receive("where").with_args(
{"foo": "bar"}
).and_return(relation.get_query())
relation.get_query().should_receive("first").once().and_return(model)
model.should_receive("fill").once()
model.should_receive("save").once()
relation.should_receive("create").never()
self.assertEqual("bar", relation.update_or_create({"foo": "bar"}).foo)
def test_update_or_create_returns_new_model(self):
flexmock(BelongsToMany)
relation = self._get_relation()
model = flexmock()
model.foo = "bar"
relation.get_query().should_receive("where").with_args(
{"foo": "bar"}
).and_return(relation.get_query())
relation.get_query().should_receive("first").once().and_return(None)
relation.should_receive("create").once().with_args(
{"bar": "baz"}, None, True
).and_return(model)
self.assertEqual(
"bar", relation.update_or_create({"foo": "bar"}, {"bar": "baz"}).foo
)
def test_sync_syncs_intermediate_table_with_given_list(self):
for list_ in [[2, 3, 4], ["2", "3", "4"]]:
flexmock(BelongsToMany)
relation = self._get_relation()
query = flexmock()
query.should_receive("from_").once().with_args("user_role").and_return(
query
)
query.should_receive("where").once().with_args("user_id", 1).and_return(
query
)
mock_query_builder = flexmock()
relation.get_query().should_receive("get_query").and_return(
mock_query_builder
)
mock_query_builder.should_receive("new_query").once().and_return(query)
query.should_receive("lists").once().with_args("role_id").and_return(
Collection([1, list_[0], list_[1]])
)
relation.should_receive("attach").once().with_args(list_[2], {}, False)
relation.should_receive("detach").once().with_args([1])
relation.get_related().should_receive("touches").and_return(False)
relation.get_parent().should_receive("touches").and_return(False)
self.assertEqual(
{"attached": [list_[2]], "detached": [1], "updated": []},
relation.sync(list_),
)
def test_sync_syncs_intermediate_table_with_given_list_and_attributes(self):
flexmock(BelongsToMany)
relation = self._get_relation()
query = flexmock()
query.should_receive("from_").once().with_args("user_role").and_return(query)
query.should_receive("where").once().with_args("user_id", 1).and_return(query)
mock_query_builder = flexmock()
relation.get_query().should_receive("get_query").and_return(mock_query_builder)
mock_query_builder.should_receive("new_query").once().and_return(query)
query.should_receive("lists").once().with_args("role_id").and_return(
Collection([1, 2, 3])
)
relation.should_receive("attach").once().with_args(4, {"foo": "bar"}, False)
relation.should_receive("update_existing_pivot").once().with_args(
3, {"bar": "baz"}, False
).and_return(True)
relation.should_receive("detach").once().with_args([1])
relation.should_receive("touch_if_touching").once()
relation.get_related().should_receive("touches").and_return(False)
relation.get_parent().should_receive("touches").and_return(False)
self.assertEqual(
{"attached": [4], "detached": [1], "updated": [3]},
relation.sync([2, {3: {"bar": "baz"}}, {4: {"foo": "bar"}}]),
)
def test_sync_does_not_return_values_that_were_not_updated(self):
flexmock(BelongsToMany)
relation = self._get_relation()
query = flexmock()
query.should_receive("from_").once().with_args("user_role").and_return(query)
query.should_receive("where").once().with_args("user_id", 1).and_return(query)
mock_query_builder = flexmock()
relation.get_query().should_receive("get_query").and_return(mock_query_builder)
mock_query_builder.should_receive("new_query").once().and_return(query)
query.should_receive("lists").once().with_args("role_id").and_return(
Collection([1, 2, 3])
)
relation.should_receive("attach").once().with_args(4, {"foo": "bar"}, False)
relation.should_receive("update_existing_pivot").once().with_args(
3, {"bar": "baz"}, False
).and_return(False)
relation.should_receive("detach").once().with_args([1])
relation.should_receive("touch_if_touching").once()
relation.get_related().should_receive("touches").and_return(False)
relation.get_parent().should_receive("touches").and_return(False)
self.assertEqual(
{"attached": [4], "detached": [1], "updated": []},
relation.sync([2, {3: {"bar": "baz"}}, {4: {"foo": "bar"}}]),
)
def test_touch_method_syncs_timestamps(self):
relation = self._get_relation()
relation.get_related().should_receive("get_updated_at_column").and_return(
"updated_at"
)
now = pendulum.now()
relation.get_related().should_receive("fresh_timestamp").and_return(now)
relation.get_related().should_receive("get_qualified_key_name").and_return(
"table.id"
)
relation.get_query().get_query().should_receive("select").once().with_args(
"table.id"
).and_return(relation.get_query().get_query())
relation.get_query().should_receive("lists").once().and_return(
Collection([1, 2, 3])
)
query = flexmock()
relation.get_related().should_receive("new_query").once().and_return(query)
query.should_receive("where_in").once().with_args("id", [1, 2, 3]).and_return(
query
)
query.should_receive("update").once().with_args({"updated_at": now})
relation.touch()
def test_touch_if_touching(self):
flexmock(BelongsToMany)
relation = self._get_relation()
relation.should_receive("_touching_parent").once().and_return(True)
relation.get_parent().should_receive("touch").once()
relation.get_parent().should_receive("touches").once().with_args(
"relation_name"
).and_return(True)
relation.should_receive("touch").once()
relation.touch_if_touching()
def test_sync_method_converts_collection_to_list_of_keys(self):
flexmock(BelongsToMany)
relation = self._get_relation()
query = flexmock()
query.should_receive("from_").once().with_args("user_role").and_return(query)
query.should_receive("where").once().with_args("user_id", 1).and_return(query)
mock_query_builder = flexmock()
relation.get_query().should_receive("get_query").and_return(mock_query_builder)
mock_query_builder.should_receive("new_query").once().and_return(query)
query.should_receive("lists").once().with_args("role_id").and_return(
Collection([1, 2, 3])
)
collection = flexmock(Collection())
collection.should_receive("model_keys").once().and_return([1, 2, 3])
relation.should_receive("_format_sync_list").with_args([1, 2, 3]).and_return(
{1: {}, 2: {}, 3: {}}
)
relation.sync(collection)
def test_where_pivot_params_used_for_new_queries(self):
flexmock(BelongsToMany)
relation = self._get_relation()
relation.get_query().should_receive("where").once().and_return(relation)
query = flexmock()
mock_query_builder = flexmock()
relation.get_query().should_receive("get_query").and_return(mock_query_builder)
mock_query_builder.should_receive("new_query").once().and_return(query)
query.should_receive("from_").once().with_args("user_role").and_return(query)
query.should_receive("where").once().with_args("user_id", 1).and_return(query)
query.should_receive("where").once().with_args(
"foo", "=", "bar", "and"
).and_return(query)
query.should_receive("lists").once().with_args("role_id").and_return(
Collection([1, 2, 3])
)
relation.should_receive("_format_sync_list").with_args([1, 2, 3]).and_return(
{1: {}, 2: {}, 3: {}}
)
relation = relation.where_pivot("foo", "=", "bar")
relation.sync([1, 2, 3])
def _get_relation(self):
builder, parent = self._get_relation_arguments()[:2]
return BelongsToMany(
builder, parent, "user_role", "user_id", "role_id", "relation_name"
)
def _get_relation_arguments(self):
flexmock(Model).should_receive("_boot_columns").and_return(["name"])
parent = flexmock(Model())
parent.should_receive("get_key").and_return(1)
parent.should_receive("get_created_at_column").and_return("created_at")
parent.should_receive("get_updated_at_column").and_return("updated_at")
query = flexmock(
QueryBuilder(
MockConnection().prepare_mock(), QueryGrammar(), QueryProcessor()
)
)
flexmock(Builder)
builder = Builder(query)
builder.should_receive("get_query").and_return(query)
related = flexmock(Model())
builder.set_model(related)
builder.should_receive("get_model").and_return(related)
related.should_receive("new_query").and_return(builder)
related.should_receive("get_key_name").and_return("id")
related.should_receive("get_table").and_return("roles")
related.should_receive("new_pivot").replace_with(lambda *args: Pivot(*args))
builder.get_query().should_receive("join").at_least().once().with_args(
"user_role", "roles.id", "=", "user_role.role_id"
)
builder.should_receive("where").at_least().once().with_args(
"user_role.user_id", "=", 1
)
return builder, parent, "user_role", "user_id", "role_id", "relation_id"
class OrmBelongsToManyModelStub(Model):
__guarded__ = []
class OrmBelongsToManyModelPivotStub(Model):
__guarded__ = []
def __init__(self):
super(OrmBelongsToManyModelPivotStub, self).__init__()
self.pivot = OrmBelongsToManyPivotStub()
class OrmBelongsToManyPivotStub(object):
pass
| {
"content_hash": "fc5661918e576ae85c551d29c6ce86b2",
"timestamp": "",
"source": "github",
"line_count": 655,
"max_line_length": 88,
"avg_line_length": 42.577099236641224,
"alnum_prop": 0.6040949512335054,
"repo_name": "sdispater/orator",
"id": "ef3a858833f84fb9e2d1779ad219868e348bcf67",
"size": "27914",
"binary": false,
"copies": "1",
"ref": "refs/heads/0.9",
"path": "tests/orm/relations/test_belongs_to_many.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2170"
},
{
"name": "Python",
"bytes": "1013569"
}
],
"symlink_target": ""
} |
import pytest
from UnofficialDDNS import __doc__ as uddns_doc
from UnofficialDDNS import __version__ as uddns_ver
from docopt import docopt
import libs
def test_config_file_and_cli_complimentary_with_full_valid_data(config_file):
config_file.write("domain: mydomain.com")
config_file.flush()
argv = ['-c', config_file.name, '-u', 'usera', '-p', 'pass']
expected = dict(log=None, daemon=False, verbose=False, interval=60, pid=None, quiet=False, version=False,
registrar='name.com', config=config_file.name, help=False,
user='usera', passwd='pass', domain='mydomain.com')
actual = libs.get_config(docopt(uddns_doc, version=uddns_ver, argv=argv))
assert expected == actual
def test_config_file_and_cli_overlapping_with_full_valid_data(config_file):
config_file.write("domain: mydomain2.com")
config_file.flush()
argv = ['-c', config_file.name, '-n', 'abc.com', '-u', 'usera', '-p', 'pass']
expected = dict(log=None, daemon=False, verbose=False, interval=60, pid=None, quiet=False, version=False,
registrar='name.com', config=config_file.name, help=False,
user='usera', passwd='pass', domain='mydomain2.com')
actual = libs.get_config(docopt(uddns_doc, version=uddns_ver, argv=argv))
assert expected == actual
def test_config_file_and_cli_overlapping_with_incomplete_data(config_file):
config_file.write("domain: mydomain3.com")
config_file.flush()
argv = ['-c', config_file.name, '-n', 'abc.com', '-u', 'usera']
with pytest.raises(libs.MultipleConfigSources.ConfigError) as e:
libs.get_config(docopt(uddns_doc, version=uddns_ver, argv=argv))
assert "A domain, username, and password must be specified." == str(e.value)
def test_cli_invalid_options():
argv = ['-n', 'test.com', '-p', 'testpw', '-u', 'testuser', '-d', 'shouldBeFlag']
with pytest.raises(SystemExit):
libs.get_config(docopt(uddns_doc, version=uddns_ver, argv=argv))
def test_cli_interval_fail():
argv = ['-n', 'test.com', '-p', 'testpw', '-u', 'testuser', '-i', 'shouldBeNum']
with pytest.raises(libs.MultipleConfigSources.ConfigError) as e:
libs.get_config(docopt(uddns_doc, version=uddns_ver, argv=argv))
assert "Config option 'interval' must be a number." == str(e.value)
argv = ['-n', 'test.com', '-p', 'testpw', '-u', 'testuser', '-i', '0']
with pytest.raises(libs.MultipleConfigSources.ConfigError) as e:
libs.get_config(docopt(uddns_doc, version=uddns_ver, argv=argv))
assert "Config option 'interval' must be greater than 0." == str(e.value)
def test_cli_pass():
argv = ['-n', 'test.com', '-p', 'testpw', '-u', 'testuser']
expected = dict(log=None, daemon=False, verbose=False, interval=60, pid=None, quiet=False, version=False,
registrar='name.com', config=None, help=False,
user='testuser', passwd='testpw', domain='test.com')
actual = libs.get_config(docopt(uddns_doc, version=uddns_ver, argv=argv))
assert expected == actual
| {
"content_hash": "e55e9d61e230cc2218956eff8b4012aa",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 109,
"avg_line_length": 49.62903225806452,
"alnum_prop": 0.6509587260318492,
"repo_name": "Robpol86/UnofficialDDNSnix",
"id": "5f6335db731df8a59718d6d659194fd0a22352e1",
"size": "3102",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_config/test_cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "384106"
},
{
"name": "Shell",
"bytes": "8707"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from ..views import QuoteList
urlpatterns = [
url(r'^$', QuoteList.as_view(), name='quote_list'),
]
| {
"content_hash": "c3a4b5a2a193c876bb6776d83127c3f3",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 55,
"avg_line_length": 17.5,
"alnum_prop": 0.6714285714285714,
"repo_name": "onepercentclub/bluebottle",
"id": "4cd426ffdca9d252e5fb7d6f2e98d87d777fa0e1",
"size": "140",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bluebottle/quotes/urls/api.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "41694"
},
{
"name": "HTML",
"bytes": "246695"
},
{
"name": "Handlebars",
"bytes": "63"
},
{
"name": "JavaScript",
"bytes": "139123"
},
{
"name": "PHP",
"bytes": "35"
},
{
"name": "PLpgSQL",
"bytes": "1369882"
},
{
"name": "PostScript",
"bytes": "2927"
},
{
"name": "Python",
"bytes": "4983116"
},
{
"name": "Rich Text Format",
"bytes": "39109"
},
{
"name": "SCSS",
"bytes": "99555"
},
{
"name": "Shell",
"bytes": "3068"
},
{
"name": "Smarty",
"bytes": "3814"
}
],
"symlink_target": ""
} |
"""
Created on 2015-06-13
@author: lujin
"""
from django.conf.urls import url
from app.setup import views
urlpatterns = [
url(r'^website/$', views.website),
url(r'^personal/$', views.personal),
] | {
"content_hash": "d7da60e34d3a613b5179ae1ac3c1365d",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 40,
"avg_line_length": 17.083333333333332,
"alnum_prop": 0.6682926829268293,
"repo_name": "myangeline/rorobot",
"id": "4948fde4c123b705f86ced28d3d1cca6b7ef91cf",
"size": "228",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/setup/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "555857"
},
{
"name": "HTML",
"bytes": "114250"
},
{
"name": "JavaScript",
"bytes": "2117838"
},
{
"name": "PHP",
"bytes": "43361"
},
{
"name": "Python",
"bytes": "19374"
}
],
"symlink_target": ""
} |
from django.shortcuts import render, redirect
from django.http import Http404
from django.forms import EmailField
from django.core.cache import cache
from django.core.exceptions import ValidationError
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.db.models import Q
import celery
import random
import time
import proj.settings
from applications.product.models import Country
from .tasks import delivery_order, recompile_order
from applications.sms_ussd.tasks import send_template_sms
__author__ = 'AlexStarov'
def ordering_step_one(request,
template_name=u'order/step_one.jinja2', ):
from .models import Product
try:
country_list = Country.objects.all()
except Country.DoesNotExist:
raise Http404
FIO = request.session.get(u'FIO', None, )
email = request.session.get(u'email', None, )
phone = request.session.get(u'phone', None, )
select_country = request.session.get(u'select_country', None, )
if request.method == 'POST':
POST_NAME = request.POST.get(u'POST_NAME', None, )
if POST_NAME == 'order_cart':
""" Взять корзину """
product_cart, create = get_cart_or_create(request, )
if create:
return redirect(to=u'/заказ/вы-где-то-оступились/', )
try:
""" Выборка всех продуктов из корзины """
products_in_cart = product_cart.cart.all()
except Product.DoesNotExist:
""" Странно!!! В корзине нету продуктов!!! """
return redirect(to='cart:show_cart', )
else:
for product_in_cart in products_in_cart:
""" Нужно проверить, есть ли вообще такой продукт в корзине? """
product_in_request = request.POST.get(u'product_in_request_%d' % product_in_cart.pk, None, )
try:
product_in_request = int(product_in_request, )
except (ValueError, TypeError, ):
continue
if product_in_request == product_in_cart.pk:
product_del = request.POST.get(u'delete_%d' % product_in_cart.pk, None, )
if product_del:
product_in_cart.product_delete
continue
product_quantity = request.POST.get(u'quantity_%d' % product_in_cart.pk, None, )
if product_quantity != product_in_cart.quantity:
product_in_cart.update_quantity(product_quantity, )
continue
else:
continue
return render(request=request,
template_name=template_name,
context={'form_action_next': u'/заказ/второй-шаг/',
'FIO': FIO,
'email': email,
'phone': phone,
'country_list': country_list,
'select_country': select_country, },
content_type='text/html', )
def ordering_step_two(request,
template_name=u'order/step_two_ua.jinja2', ):
from .models import Order, DeliveryCompany
FIO = request.POST.get(u'FIO', False, )
if FIO:
request.session[u'FIO'] = FIO.strip()
email = request.POST.get(u'email', False, )
email_error = False
phone = request.POST.get(u'phone', False, )
if phone:
request.session[u'phone'] = phone.strip()
region, settlement, address, postcode = False, False, False, False
country = request.POST.get(u'select_country', None, )
try:
country_list = Country.objects.all()
except Country.DoesNotExist:
raise Http404
else:
try:
select_country = int(country, )
except (ValueError, TypeError):
raise Http404
else:
country = country_list.get(pk=select_country, )
if country:
request.session[u'select_country'] = select_country
if select_country == 1:
region = request.session.get(u'region', False, )
settlement = request.session.get(u'settlement', False, )
else:
""" Если страна не Украина """
template_name = u'order/step_two_others.jinja2'
address = request.session.get(u'address', False, )
postcode = request.session.get(u'postcode', False, )
if request.method == 'POST':
POST_NAME = request.POST.get(u'POST_NAME', None, )
if POST_NAME == 'ordering_step_one':
""" Здесь как-то нужно проверить email """
if email:
email = email.lower().strip(' ').replace(' ', '', )
# if SERVER or not SERVER:
# if validate_email(email, check_mx=True, ):
# """ Если проверка на существование сервера прошла...
# То делаем полную проверку адреса на существование... """
# is_validate = validate_email(email, verify=True, )
# if not is_validate:
""" Делаем повторную проверку на просто валидацию E-Mail адреса """
try:
EmailField().clean(email, )
except ValidationError:
email_error = u'Ваш E-Mail адрес не существует.'
# else:
# is_validate = True
# print 'email_error: ', email_error, ' email: ', email
if not email_error:
request.session[u'email'] = email
""" Взять или создать корзину пользователя """
""" Создать теоретически это не нормально """
cart, create = get_cart_or_create(request, )
if create:
return redirect(to=u'/заказ/вы-где-то-оступились/', )
if request.user.is_authenticated() and request.user.is_active:
user_id = request.session.get(u'_auth_user_id', None, )
sessionid = request.COOKIES.get(u'sessionid', None, )
request.session[u'cart_pk'] = cart.pk
order_pk = request.session.get(u'order_pk', False, )
if order_pk:
try:
order_pk = int(order_pk, )
except ValueError:
del order_pk
else:
del order_pk
order_pk_last = request.session.get(u'order_pk_last', False, )
if order_pk_last:
try:
order_pk_last = int(order_pk_last, )
except ValueError:
del order_pk_last
else:
del order_pk_last
if 'order_pk' in locals()\
and 'order_pk_last' in locals()\
and order_pk == order_pk_last:
del order_pk
if 'order_pk' in locals() and order_pk and type(order_pk) == int:
# try:
q = Q(pk=order_pk,
sessionid=sessionid,
FIO=FIO,
email=email,
phone=phone,
country_id=select_country, )
if request.user.is_authenticated() and request.user.is_active:
# order = Order.objects.get(pk=order_pk,
# sessionid=sessionid,
# user_id=user_id,
# FIO=FIO,
# email=email,
# phone=phone,
# country_id=select_country, )
q = q & Q(user_id=user_id, )
# else:
# order = Order.objects.get(pk=order_pk,
# sessionid=sessionid,
# FIO=FIO,
# email=email,
# phone=phone,
# country_id=select_country, )
try:
order = Order.objects.get(q)
except Order.DoesNotExist:
pass
if 'order' not in locals():
order = Order(sessionid=sessionid, FIO=FIO, email=email,
phone=phone, country_id=select_country, )
if request.user.is_authenticated() and request.user.is_active:
order.user_id = user_id
order.save()
request.session[u'order_pk'] = order.pk
# else:
# # email_error = u'Сервер указанный в Вашем E-Mail - ОТСУТСВУЕТ !!!'
# email_error = u'Проверьте пожалуйста указанный Вами e-mail.'
else:
email_error = u'Вы забыли указать Ваш E-Mail.'
if email_error:
template_name = u'order/step_one.jinja2'
else:
return redirect(to=u'/заказ/вы-где-то-оступились/', )
else:
return redirect(to=u'/заказ/вы-где-то-оступились/', )
try:
delivery_companies_list = DeliveryCompany.objects.all()
except Country.DoesNotExist:
raise Http404
return render(request=request,
template_name=template_name,
context={'form_action_next': u'/заказ/результат-оформления/',
'delivery_companies_list': delivery_companies_list,
'country_list': country_list,
'FIO': FIO,
'email': email,
'email_error': email_error,
'phone': phone,
'select_country': country,
'region': region,
'settlement': settlement,
'address': address,
'postcode': postcode, },
content_type='text/html', )
def result_ordering(request, ):
from .models import Order, Product
if request.method == 'POST':
POST_NAME = request.POST.get(u'POST_NAME', None, )
if POST_NAME == 'ordering_step_two':
sessionid = request.COOKIES.get(u'sessionid', None, )
# print(POST_NAME, sessionid)
if not cache.get(key='order_%s' % sessionid, ):
""" Берем случайное значение паузы от 0 до одной секунды для того,
что-бы пользователи которые жмут по два раза не успели уйти со страницы. """
time.sleep(random.uniform(0, 1))
if not cache.get(key='order_%s' % sessionid, ):
cache.set(
key='order_%s' % sessionid,
value=True,
timeout=15, )
else:
return redirect(to='order:already_processing_ru', permanent=True, )
else:
return redirect(to='order:already_processing_ru', permanent=True, )
# FIO = request.session.get(u'FIO', None, )
# email = request.session.get(u'email', None, )
# phone = request.session.get(u'phone', None, )
select_country = request.session.get(u'select_country', None, )
try:
order_pk = int(request.session.get(u'order_pk', None, ), )
try:
order = Order.objects.get(pk=order_pk, )
except Order.DoesNotExist:
return redirect(to='order:unsuccessful_ru', permanent=True, )
except (TypeError, ValueError):
return redirect(to='order:unsuccessful_ru', permanent=True, )
if select_country == 1:
""" Страна Украина """
region = request.POST.get(u'region', None, )
order.region = region
request.session[u'region'] = region
settlement = request.POST.get(u'settlement', None, )
order.settlement = settlement
request.session[u'settlement'] = settlement
delivery_company = request.POST.get(u'select_delivery_company', None, )
try:
delivery_company = int(delivery_company, )
except (TypeError, ValueError, ):
delivery_company = 1
# from apps.cart.models import DeliveryCompany
# try:
# delivery_company = DeliveryCompany.objects.get(select_number=delivery_company, )
# except DeliveryCompany.DoesNotExist:
# delivery_company = None
order.delivery_company_id = delivery_company
order.warehouse_number = request.POST.get(u'warehouse_number', None, )
order.checkbox1 = bool(request.POST.get(u'choice1', True, ), )
order.checkbox2 = bool(request.POST.get(u'choice2', False, ), )
else:
""" для любого другого Государства """
address = request.POST.get(u'address', None, )
order.address = address
request.session[u'address'] = address
postcode = request.POST.get(u'postcode', None, )
order.postcode = postcode
request.session[u'postcode'] = postcode
order.comment = request.POST.get(u'comment', None, )
order.save()
cart, create = get_cart_or_create(request, )
if create:
return redirect(to='order:unsuccessful_ru', permanent=True, )
try:
""" Выборка всех продуктов из корзины """
all_products = cart.cart.all()
except Product.DoesNotExist:
""" Странно!!! В корзине нету продуктов!!! """
return redirect(to='cart:show_cart', )
else:
""" Берем указатель на model заказ """
ContentType_Order = ContentType.objects.get_for_model(Order, )
""" Перемещение всех продуктов из корзины в заказ """
""" Просто меняем 2-а поля назначения у всех продуктов в этой корзине """
all_products.update(content_type=ContentType_Order, object_id=order.pk, )
""" Переносим ссылающийся купон с "корзины" на "заказ" """
coupons = cart.Cart_child.all()
if len(coupons) == 1:
""" Удаляем ссылку на "корзину" """
coupons[0].child_cart.remove(cart, )
""" Добавляем ссылку на "заказ" """
coupons[0].child_order.add(order, )
print('Coupon_key: ', coupons[0].key)
""" Удаляем старую корзину """
cart.delete()
""" Отправляем менеджеру заказ с описанием
а пользователя благодарное письмо номером заказа и предварительной суммой
и просьбой подождать звонка менеджера для уточнения заказа """
delivery_order.apply_async(
queue='delivery_send',
kwargs={'order_pk': order.pk,
'email_template_name_to_admin': proj.settings.EMAIL_TEMPLATE_NAME['SEND_ORDER_TO_ADMIN'],
'email_template_name_to_client': proj.settings.EMAIL_TEMPLATE_NAME['SEND_ORDER_NUMBER'], },
task_id='celery-task-id-delivery_order-{0}'.format(celery.utils.uuid(), ),
)
phone = order.phone.lstrip('+').replace('(', '').replace(')', '')\
.replace(' ', '').replace('-', '').replace('.', '').replace(',', '') \
.lstrip('380').lstrip('38').lstrip('80').lstrip('0')
if len(phone, ) == 9:
send_template_sms.apply_async(
queue='delivery_send',
kwargs={
'sms_to_phone_char': '+380%s' % phone[:9],
'sms_template_name': proj.settings.SMS_TEMPLATE_NAME['SEND_ORDER_NUMBER'],
'sms_order_number': order.number,
},
task_id='celery-task-id-send_template_sms-{0}'.format(celery.utils.uuid(), ),
)
request.session[u'order_pk_last'] = order.pk
recompile_order.apply_async(
queue='celery',
kwargs={'order_pk': order.pk, },
task_id='celery-task-id-recompile_order-{0}'.format(celery.utils.uuid(), ),
)
return redirect(to='order:successful_ru', permanent=True, )
else:
return redirect(to='order:unsuccessful_ru', permanent=True, )
else:
return redirect(to='order:unsuccessful_ru', permanent=True, )
def order_success(request,
template_name=u'order/successful.jinja2', ):
from .models import Order
order_pk = request.session.get(u'order_pk_last', None, )
order = None
if order_pk is None:
return redirect(to='order:unsuccessful_ru', )
else:
try:
order_pk = int(order_pk, )
try:
order = Order.objects.get(pk=order_pk, )
except Order.DoesNotExist:
order_pk = None; order = None
except ValueError:
order_pk = None; order = None
return render(request=request,
template_name=template_name,
context={'order_pk': order_pk,
'order': order, },
content_type='text/html', )
def get_cart_or_create(request, user_object=False, created=True, ):
from .models import Cart
sessionid = request.COOKIES.get(u'sessionid', None, )
if not user_object:
if request.user.is_authenticated() and request.user.is_active:
user_id_ = request.session.get(u'_auth_user_id', None, )
try:
user_id_ = int(user_id_, )
user_object = get_user_model().objects.get(pk=user_id_, )
except ValueError:
user_object = None
else:
user_object = None
if created:
cart, created = Cart.objects.get_or_create(user=user_object,
sessionid=sessionid, )
else:
try:
cart = Cart.objects.get(user=user_object,
sessionid=sessionid, )
except Cart.DoesNotExist:
cart = None
return cart
return cart, created
| {
"content_hash": "4212dd4ca3a5eeb0f9e825d1642be386",
"timestamp": "",
"source": "github",
"line_count": 446,
"max_line_length": 119,
"avg_line_length": 44.026905829596416,
"alnum_prop": 0.4833469138317376,
"repo_name": "AlexStarov/Shop",
"id": "7cdb0def004c2297cad0536a727814c10c324079",
"size": "20814",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "applications/cart/order.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "268281"
},
{
"name": "HTML",
"bytes": "138853"
},
{
"name": "JavaScript",
"bytes": "10629133"
},
{
"name": "PHP",
"bytes": "14"
},
{
"name": "Python",
"bytes": "1532862"
},
{
"name": "Shell",
"bytes": "2089"
}
],
"symlink_target": ""
} |
"""
Python Enumerations
NOTE: This module is to provide enum support in Python27. Technically,
this module could be made as a dependency for symoro package. However,
in order to reduce the number of dependencies, the source code is
included here. See below under the LICENSE section for further details
on usage of this file.
######################### LICENSE begins ##############################
Copyright (c) 2013, Ethan Furman.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
Redistributions of source code must retain the above
copyright notice, this list of conditions and the
following disclaimer.
Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials
provided with the distribution.
Neither the name Ethan Furman nor the names of any
contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
########################### LICENSE ends ##############################
"""
import sys as _sys
__all__ = ['Enum', 'IntEnum', 'unique']
pyver = float('%s.%s' % _sys.version_info[:2])
try:
any
except NameError:
def any(iterable):
for element in iterable:
if element:
return True
return False
try:
from collections import OrderedDict
except ImportError:
OrderedDict = None
try:
basestring
except NameError:
# In Python 2 basestring is the ancestor of both str and unicode
# in Python 3 it's just str, but was missing in 3.1
basestring = str
class _RouteClassAttributeToGetattr(object):
"""Route attribute access on a class to __getattr__.
This is a descriptor, used to define attributes that act differently when
accessed through an instance and through a class. Instance access remains
normal, but access to an attribute through a class will be routed to the
class's __getattr__ method; this is done by raising AttributeError.
"""
def __init__(self, fget=None):
self.fget = fget
def __get__(self, instance, ownerclass=None):
if instance is None:
raise AttributeError()
return self.fget(instance)
def __set__(self, instance, value):
raise AttributeError("can't set attribute")
def __delete__(self, instance):
raise AttributeError("can't delete attribute")
def _is_descriptor(obj):
"""Returns True if obj is a descriptor, False otherwise."""
return (
hasattr(obj, '__get__') or
hasattr(obj, '__set__') or
hasattr(obj, '__delete__'))
def _is_dunder(name):
"""Returns True if a __dunder__ name, False otherwise."""
return (name[:2] == name[-2:] == '__' and
name[2:3] != '_' and
name[-3:-2] != '_' and
len(name) > 4)
def _is_sunder(name):
"""Returns True if a _sunder_ name, False otherwise."""
return (name[0] == name[-1] == '_' and
name[1:2] != '_' and
name[-2:-1] != '_' and
len(name) > 2)
def _make_class_unpicklable(cls):
"""Make the given class un-picklable."""
def _break_on_call_reduce(self, protocol=None):
raise TypeError('%r cannot be pickled' % self)
cls.__reduce_ex__ = _break_on_call_reduce
cls.__module__ = '<unknown>'
class _EnumDict(dict):
"""Track enum member order and ensure member names are not reused.
EnumMeta will use the names found in self._member_names as the
enumeration member names.
"""
def __init__(self):
super(_EnumDict, self).__init__()
self._member_names = []
def __setitem__(self, key, value):
"""Changes anything not dundered or not a descriptor.
If a descriptor is added with the same name as an enum member, the name
is removed from _member_names (this may leave a hole in the numerical
sequence of values).
If an enum member name is used twice, an error is raised; duplicate
values are not checked for.
Single underscore (sunder) names are reserved.
Note: in 3.x __order__ is simply discarded as a not necessary piece
leftover from 2.x
"""
if pyver >= 3.0 and key == '__order__':
return
if _is_sunder(key):
raise ValueError('_names_ are reserved for future Enum use')
elif _is_dunder(key):
pass
elif key in self._member_names:
# descriptor overwriting an enum?
raise TypeError('Attempted to reuse key: %r' % key)
elif not _is_descriptor(value):
if key in self:
# enum overwriting a descriptor?
raise TypeError('Key already defined as: %r' % self[key])
self._member_names.append(key)
super(_EnumDict, self).__setitem__(key, value)
# Dummy value for Enum as EnumMeta explicity checks for it, but of course until
# EnumMeta finishes running the first time the Enum class doesn't exist. This
# is also why there are checks in EnumMeta like `if Enum is not None`
Enum = None
class EnumMeta(type):
"""Metaclass for Enum"""
@classmethod
def __prepare__(metacls, cls, bases):
return _EnumDict()
def __new__(metacls, cls, bases, classdict):
# an Enum class is final once enumeration items have been defined; it
# cannot be mixed with other types (int, float, etc.) if it has an
# inherited __new__ unless a new __new__ is defined (or the resulting
# class will fail).
if type(classdict) is dict:
original_dict = classdict
classdict = _EnumDict()
for k, v in original_dict.items():
classdict[k] = v
member_type, first_enum = metacls._get_mixins_(bases)
__new__, save_new, use_args = metacls._find_new_(classdict, member_type,
first_enum)
# save enum items into separate mapping so they don't get baked into
# the new class
members = dict((k, classdict[k]) for k in classdict._member_names)
for name in classdict._member_names:
del classdict[name]
# py2 support for definition order
__order__ = classdict.get('__order__')
if __order__ is None:
if pyver < 3.0:
__order__ = [name for (name, value) in sorted(members.items(), key=lambda item: item[1])]
else:
__order__ = classdict._member_names
else:
del classdict['__order__']
if pyver < 3.0:
__order__ = __order__.replace(',', ' ').split()
aliases = [name for name in members if name not in __order__]
__order__ += aliases
# check for illegal enum names (any others?)
invalid_names = set(members) & set(['mro'])
if invalid_names:
raise ValueError('Invalid enum member name(s): %s' % (
', '.join(invalid_names), ))
# create our new Enum type
enum_class = super(EnumMeta, metacls).__new__(metacls, cls, bases, classdict)
enum_class._member_names_ = [] # names in random order
if OrderedDict is not None:
enum_class._member_map_ = OrderedDict()
else:
enum_class._member_map_ = {} # name->value map
enum_class._member_type_ = member_type
# Reverse value->name map for hashable values.
enum_class._value2member_map_ = {}
# instantiate them, checking for duplicates as we go
# we instantiate first instead of checking for duplicates first in case
# a custom __new__ is doing something funky with the values -- such as
# auto-numbering ;)
if __new__ is None:
__new__ = enum_class.__new__
for member_name in __order__:
value = members[member_name]
if not isinstance(value, tuple):
args = (value, )
else:
args = value
if member_type is tuple: # special case for tuple enums
args = (args, ) # wrap it one more time
if not use_args or not args:
enum_member = __new__(enum_class)
if not hasattr(enum_member, '_value_'):
enum_member._value_ = value
else:
enum_member = __new__(enum_class, *args)
if not hasattr(enum_member, '_value_'):
enum_member._value_ = member_type(*args)
value = enum_member._value_
enum_member._name_ = member_name
enum_member.__objclass__ = enum_class
enum_member.__init__(*args)
# If another member with the same value was already defined, the
# new member becomes an alias to the existing one.
for name, canonical_member in enum_class._member_map_.items():
if canonical_member.value == enum_member._value_:
enum_member = canonical_member
break
else:
# Aliases don't appear in member names (only in __members__).
enum_class._member_names_.append(member_name)
enum_class._member_map_[member_name] = enum_member
try:
# This may fail if value is not hashable. We can't add the value
# to the map, and by-value lookups for this value will be
# linear.
enum_class._value2member_map_[value] = enum_member
except TypeError:
pass
# If a custom type is mixed into the Enum, and it does not know how
# to pickle itself, pickle.dumps will succeed but pickle.loads will
# fail. Rather than have the error show up later and possibly far
# from the source, sabotage the pickle protocol for this class so
# that pickle.dumps also fails.
#
# However, if the new class implements its own __reduce_ex__, do not
# sabotage -- it's on them to make sure it works correctly. We use
# __reduce_ex__ instead of any of the others as it is preferred by
# pickle over __reduce__, and it handles all pickle protocols.
unpicklable = False
if '__reduce_ex__' not in classdict:
if member_type is not object:
methods = ('__getnewargs_ex__', '__getnewargs__',
'__reduce_ex__', '__reduce__')
if not any(m in member_type.__dict__ for m in methods):
_make_class_unpicklable(enum_class)
unpicklable = True
# double check that repr and friends are not the mixin's or various
# things break (such as pickle)
for name in ('__repr__', '__str__', '__format__', '__reduce_ex__'):
class_method = getattr(enum_class, name)
obj_method = getattr(member_type, name, None)
enum_method = getattr(first_enum, name, None)
if name not in classdict and class_method is not enum_method:
if name == '__reduce_ex__' and unpicklable:
continue
setattr(enum_class, name, enum_method)
# method resolution and int's are not playing nice
# Python's less than 2.6 use __cmp__
if pyver < 2.6:
if issubclass(enum_class, int):
setattr(enum_class, '__cmp__', getattr(int, '__cmp__'))
elif pyver < 3.0:
if issubclass(enum_class, int):
for method in (
'__le__',
'__lt__',
'__gt__',
'__ge__',
'__eq__',
'__ne__',
'__hash__',
):
setattr(enum_class, method, getattr(int, method))
# replace any other __new__ with our own (as long as Enum is not None,
# anyway) -- again, this is to support pickle
if Enum is not None:
# if the user defined their own __new__, save it before it gets
# clobbered in case they subclass later
if save_new:
setattr(enum_class, '__member_new__', enum_class.__dict__['__new__'])
setattr(enum_class, '__new__', Enum.__dict__['__new__'])
return enum_class
def __call__(cls, value, names=None, module=None, type=None):
"""Either returns an existing member, or creates a new enum class.
This method is used both when an enum class is given a value to match
to an enumeration member (i.e. Color(3)) and for the functional API
(i.e. Color = Enum('Color', names='red green blue')).
When used for the functional API: `module`, if set, will be stored in
the new class' __module__ attribute; `type`, if set, will be mixed in
as the first base class.
Note: if `module` is not set this routine will attempt to discover the
calling module by walking the frame stack; if this is unsuccessful
the resulting class will not be pickleable.
"""
if names is None: # simple value lookup
return cls.__new__(cls, value)
# otherwise, functional API: we're creating a new Enum type
return cls._create_(value, names, module=module, type=type)
def __contains__(cls, member):
return isinstance(member, cls) and member.name in cls._member_map_
def __delattr__(cls, attr):
# nicer error message when someone tries to delete an attribute
# (see issue19025).
if attr in cls._member_map_:
raise AttributeError(
"%s: cannot delete Enum member." % cls.__name__)
super(EnumMeta, cls).__delattr__(attr)
def __dir__(self):
return (['__class__', '__doc__', '__members__', '__module__'] +
self._member_names_)
@property
def __members__(cls):
"""Returns a mapping of member name->value.
This mapping lists all enum members, including aliases. Note that this
is a copy of the internal mapping.
"""
return cls._member_map_.copy()
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
if _is_dunder(name):
raise AttributeError(name)
try:
return cls._member_map_[name]
except KeyError:
raise AttributeError(name)
def __getitem__(cls, name):
return cls._member_map_[name]
def __iter__(cls):
return (cls._member_map_[name] for name in cls._member_names_)
def __reversed__(cls):
return (cls._member_map_[name] for name in reversed(cls._member_names_))
def __len__(cls):
return len(cls._member_names_)
def __repr__(cls):
return "<enum %r>" % cls.__name__
def __setattr__(cls, name, value):
"""Block attempts to reassign Enum members.
A simple assignment to the class namespace only changes one of the
several possible ways to get an Enum member from the Enum class,
resulting in an inconsistent Enumeration.
"""
member_map = cls.__dict__.get('_member_map_', {})
if name in member_map:
raise AttributeError('Cannot reassign members.')
super(EnumMeta, cls).__setattr__(name, value)
def _create_(cls, class_name, names=None, module=None, type=None):
"""Convenience method to create a new Enum class.
`names` can be:
* A string containing member names, separated either with spaces or
commas. Values are auto-numbered from 1.
* An iterable of member names. Values are auto-numbered from 1.
* An iterable of (member name, value) pairs.
* A mapping of member name -> value.
"""
metacls = cls.__class__
if type is None:
bases = (cls, )
else:
bases = (type, cls)
classdict = metacls.__prepare__(class_name, bases)
__order__ = []
# special processing needed for names?
if isinstance(names, basestring):
names = names.replace(',', ' ').split()
if isinstance(names, (tuple, list)) and isinstance(names[0], basestring):
names = [(e, i+1) for (i, e) in enumerate(names)]
# Here, names is either an iterable of (name, value) or a mapping.
for item in names:
if isinstance(item, basestring):
member_name, member_value = item, names[item]
else:
member_name, member_value = item
classdict[member_name] = member_value
__order__.append(member_name)
# only set __order__ in classdict if name/value was not from a mapping
if not isinstance(item, basestring):
classdict['__order__'] = ' '.join(__order__)
enum_class = metacls.__new__(metacls, class_name, bases, classdict)
# TODO: replace the frame hack if a blessed way to know the calling
# module is ever developed
if module is None:
try:
module = _sys._getframe(2).f_globals['__name__']
except (AttributeError, ValueError):
pass
if module is None:
_make_class_unpicklable(enum_class)
else:
enum_class.__module__ = module
return enum_class
@staticmethod
def _get_mixins_(bases):
"""Returns the type for creating enum members, and the first inherited
enum class.
bases: the tuple of bases that was given to __new__
"""
if not bases or Enum is None:
return object, Enum
# double check that we are not subclassing a class with existing
# enumeration members; while we're at it, see if any other data
# type has been mixed in so we can use the correct __new__
member_type = first_enum = None
for base in bases:
if (base is not Enum and
issubclass(base, Enum) and
base._member_names_):
raise TypeError("Cannot extend enumerations")
# base is now the last base in bases
if not issubclass(base, Enum):
raise TypeError("new enumerations must be created as "
"`ClassName([mixin_type,] enum_type)`")
# get correct mix-in type (either mix-in type of Enum subclass, or
# first base if last base is Enum)
if not issubclass(bases[0], Enum):
member_type = bases[0] # first data type
first_enum = bases[-1] # enum type
else:
for base in bases[0].__mro__:
# most common: (IntEnum, int, Enum, object)
# possible: (<Enum 'AutoIntEnum'>, <Enum 'IntEnum'>,
# <class 'int'>, <Enum 'Enum'>,
# <class 'object'>)
if issubclass(base, Enum):
if first_enum is None:
first_enum = base
else:
if member_type is None:
member_type = base
return member_type, first_enum
if pyver < 3.0:
@staticmethod
def _find_new_(classdict, member_type, first_enum):
"""Returns the __new__ to be used for creating the enum members.
classdict: the class dictionary given to __new__
member_type: the data type whose __new__ will be used by default
first_enum: enumeration to check for an overriding __new__
"""
# now find the correct __new__, checking to see of one was defined
# by the user; also check earlier enum classes in case a __new__ was
# saved as __member_new__
__new__ = classdict.get('__new__', None)
if __new__:
return None, True, True # __new__, save_new, use_args
N__new__ = getattr(None, '__new__')
O__new__ = getattr(object, '__new__')
if Enum is None:
E__new__ = N__new__
else:
E__new__ = Enum.__dict__['__new__']
# check all possibles for __member_new__ before falling back to
# __new__
for method in ('__member_new__', '__new__'):
for possible in (member_type, first_enum):
try:
target = possible.__dict__[method]
except (AttributeError, KeyError):
target = getattr(possible, method, None)
if target not in [
None,
N__new__,
O__new__,
E__new__,
]:
if method == '__member_new__':
classdict['__new__'] = target
return None, False, True
if isinstance(target, staticmethod):
target = target.__get__(member_type)
__new__ = target
break
if __new__ is not None:
break
else:
__new__ = object.__new__
# if a non-object.__new__ is used then whatever value/tuple was
# assigned to the enum member name will be passed to __new__ and to the
# new enum member's __init__
if __new__ is object.__new__:
use_args = False
else:
use_args = True
return __new__, False, use_args
else:
@staticmethod
def _find_new_(classdict, member_type, first_enum):
"""Returns the __new__ to be used for creating the enum members.
classdict: the class dictionary given to __new__
member_type: the data type whose __new__ will be used by default
first_enum: enumeration to check for an overriding __new__
"""
# now find the correct __new__, checking to see of one was defined
# by the user; also check earlier enum classes in case a __new__ was
# saved as __member_new__
__new__ = classdict.get('__new__', None)
# should __new__ be saved as __member_new__ later?
save_new = __new__ is not None
if __new__ is None:
# check all possibles for __member_new__ before falling back to
# __new__
for method in ('__member_new__', '__new__'):
for possible in (member_type, first_enum):
target = getattr(possible, method, None)
if target not in (
None,
None.__new__,
object.__new__,
Enum.__new__,
):
__new__ = target
break
if __new__ is not None:
break
else:
__new__ = object.__new__
# if a non-object.__new__ is used then whatever value/tuple was
# assigned to the enum member name will be passed to __new__ and to the
# new enum member's __init__
if __new__ is object.__new__:
use_args = False
else:
use_args = True
return __new__, save_new, use_args
########################################################
# In order to support Python 2 and 3 with a single
# codebase we have to create the Enum methods separately
# and then use the `type(name, bases, dict)` method to
# create the class.
########################################################
temp_enum_dict = {}
temp_enum_dict['__doc__'] = "Generic enumeration.\n\n Derive from this class to define new enumerations.\n\n"
def __new__(cls, value):
# all enum instances are actually created during class construction
# without calling this method; this method is called by the metaclass'
# __call__ (i.e. Color(3) ), and by pickle
if type(value) is cls:
# For lookups like Color(Color.red)
value = value.value
#return value
# by-value search for a matching enum member
# see if it's in the reverse mapping (for hashable values)
try:
if value in cls._value2member_map_:
return cls._value2member_map_[value]
except TypeError:
# not there, now do long search -- O(n) behavior
for member in cls._member_map_.values():
if member.value == value:
return member
raise ValueError("%s is not a valid %s" % (value, cls.__name__))
temp_enum_dict['__new__'] = __new__
del __new__
def __repr__(self):
return "<%s.%s: %r>" % (
self.__class__.__name__, self._name_, self._value_)
temp_enum_dict['__repr__'] = __repr__
del __repr__
def __str__(self):
return "%s.%s" % (self.__class__.__name__, self._name_)
temp_enum_dict['__str__'] = __str__
del __str__
def __dir__(self):
added_behavior = [m for m in self.__class__.__dict__ if m[0] != '_']
return (['__class__', '__doc__', '__module__', 'name', 'value'] + added_behavior)
temp_enum_dict['__dir__'] = __dir__
del __dir__
def __format__(self, format_spec):
# mixed-in Enums should use the mixed-in type's __format__, otherwise
# we can get strange results with the Enum name showing up instead of
# the value
# pure Enum branch
if self._member_type_ is object:
cls = str
val = str(self)
# mix-in branch
else:
cls = self._member_type_
val = self.value
return cls.__format__(val, format_spec)
temp_enum_dict['__format__'] = __format__
del __format__
####################################
# Python's less than 2.6 use __cmp__
if pyver < 2.6:
def __cmp__(self, other):
if type(other) is self.__class__:
if self is other:
return 0
return -1
return NotImplemented
raise TypeError("unorderable types: %s() and %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__cmp__'] = __cmp__
del __cmp__
else:
def __le__(self, other):
raise TypeError("unorderable types: %s() <= %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__le__'] = __le__
del __le__
def __lt__(self, other):
raise TypeError("unorderable types: %s() < %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__lt__'] = __lt__
del __lt__
def __ge__(self, other):
raise TypeError("unorderable types: %s() >= %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__ge__'] = __ge__
del __ge__
def __gt__(self, other):
raise TypeError("unorderable types: %s() > %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__gt__'] = __gt__
del __gt__
def __eq__(self, other):
if type(other) is self.__class__:
return self is other
return NotImplemented
temp_enum_dict['__eq__'] = __eq__
del __eq__
def __ne__(self, other):
if type(other) is self.__class__:
return self is not other
return NotImplemented
temp_enum_dict['__ne__'] = __ne__
del __ne__
def __hash__(self):
return hash(self._name_)
temp_enum_dict['__hash__'] = __hash__
del __hash__
def __reduce_ex__(self, proto):
return self.__class__, (self._value_, )
temp_enum_dict['__reduce_ex__'] = __reduce_ex__
del __reduce_ex__
# _RouteClassAttributeToGetattr is used to provide access to the `name`
# and `value` properties of enum members while keeping some measure of
# protection from modification, while still allowing for an enumeration
# to have members named `name` and `value`. This works because enumeration
# members are not set directly on the enum class -- __getattr__ is
# used to look them up.
@_RouteClassAttributeToGetattr
def name(self):
return self._name_
temp_enum_dict['name'] = name
del name
@_RouteClassAttributeToGetattr
def value(self):
return self._value_
temp_enum_dict['value'] = value
del value
Enum = EnumMeta('Enum', (object, ), temp_enum_dict)
del temp_enum_dict
# Enum has now been created
###########################
class IntEnum(int, Enum):
"""Enum where members are also (and must be) ints"""
def unique(enumeration):
"""Class decorator that ensures only unique members exist in an enumeration."""
duplicates = []
for name, member in enumeration.__members__.items():
if name != member.name:
duplicates.append((name, member.name))
if duplicates:
duplicate_names = ', '.join(
["%s -> %s" % (alias, name) for (alias, name) in duplicates]
)
raise ValueError('duplicate names found in %r: %s' %
(enumeration, duplicate_names)
)
return enumeration
| {
"content_hash": "602f9e37159369aef2a04ed0a446f558",
"timestamp": "",
"source": "github",
"line_count": 812,
"max_line_length": 113,
"avg_line_length": 37.60960591133005,
"alnum_prop": 0.5502144798454436,
"repo_name": "ELZo3/symoro",
"id": "17a4fb2b108ebbaecec4e5bf50c241211d22d713",
"size": "30539",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "symoroutils/enum.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "521944"
},
{
"name": "Shell",
"bytes": "97"
}
],
"symlink_target": ""
} |
'''
Created on Mar 6, 2016
@author: Laurent Marchelli
'''
import os
import shutil
import Command
from generators.Generator import Generator
from generators.linapple.linappleConfig import LinappleConfig
import recalboxFiles
class LinappleGenerator(Generator):
'''
Command line generator for linapple-pie emulator
Ensure the user's configuration directory has all needed files to run
linapple emulator and manage configuration file to tune emulator behaviour
with current hardware configuration.
Args:
path_init (str):
Full path name where default settings are stored.
('/recalbox/share_init/system/.linapple')
path_user (str):
Full path name where user settings are stored.
('/recalbox/share/system/.linapple')
'''
def __init__(self, path_init, path_user):
self.path_init = path_init
self.path_user = path_user
self.resources = ['Master.dsk']
self.filename = 'linapple.conf'
def check_resources(self):
'''
Check system needed resources
Returns (bool:
Returns True if the check suceeded, False otherwise.
'''
# Create user setting path, if it does not exists
if not os.path.exists(self.path_user):
os.makedirs(self.path_user)
# Ensure system configuration file is available
sys_conf = os.path.join(self.path_init, self.filename)
if not os.path.exists(sys_conf):
return False
# Ensure system resources are available
for r in self.resources:
sys_filename = os.path.join(self.path_init, r)
if not os.path.exists(sys_filename):
return False
usr_filename = os.path.join(self.path_user, r)
if not os.path.exists(usr_filename):
shutil.copyfile(sys_filename, usr_filename)
return True
def generate(self, system, rom, playersControllers):
'''
Configure linapple inputs and return the command line to run.
Args:
system (Emulator):
Emulator object containing a config dictionay with all
parameters set in EmulationStation.
rom (str) :
Path and filename of the rom to run.
playerControllers (dict):
Dictionary of controllers connected (1 to 5).
Returns (configgen.Command, None) :
Returns Command object containing needed parameter to launch the
emulator or None if an error occured.
'''
# Check resources
if not self.check_resources():
return
# Load config file
usr_conf = os.path.join(self.path_user, self.filename)
filename = usr_conf \
if os.path.exists(usr_conf) \
else os.path.join(self.path_init, self.filename)
config = LinappleConfig(filename=filename)
# Adjust configuration
config.joysticks(playersControllers)
config.system(system, rom)
# Save changes
config.save(filename=usr_conf)
commandArray = [ recalboxFiles.recalboxBins[system.config['emulator']] ]
if 'args' in system.config and system.config['args'] is not None:
commandArray.extend(system.config['args'])
return Command.Command(videomode=system.config['videomode'], array=commandArray)
def config_upgrade(self, version):
'''
Upgrade the user's configuration file with new values added to the
system configuration file upgraded by S11Share:do_upgrade()
Args:
version (str): New Recalbox version
Returns (bool):
Returns True if this Generators sucessfully handled the upgrade.
'''
# Check resources
if not self.check_resources():
return False
# Load system configuration file
config = LinappleConfig(filename=os.path.join(
self.path_init, self.filename))
# If an user's configuration file exists, upgrade it
usr_conf = os.path.join(self.path_user, self.filename)
if os.path.exists(usr_conf):
config_sys = config
config = LinappleConfig(filename=usr_conf)
for k,v in config_sys.settings.items():
if k not in config.settings:
config.settings[k]=v
# Save config file (original/updated) to user's directory
config.save(filename=usr_conf)
print("{} 's configuration successfully upgraded".format(self.__class__.__name__))
return True
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| {
"content_hash": "9c9cc343518e218907c0485f27e1927f",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 90,
"avg_line_length": 34.105633802816904,
"alnum_prop": 0.6083006400991121,
"repo_name": "digitalLumberjack/recalbox-configgen",
"id": "15c5320f3ba001dddec8f93ef20a4e78e8445f7f",
"size": "4843",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "configgen/generators/linapple/linappleGenerator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "131403"
}
],
"symlink_target": ""
} |
def checkCurNum(listNumbers,numToMatch):
#print("Num match: "+str(numToMatch)+ " list: "+str(listNumbers))
if len(listNumbers) < 2:
return False
matchFound = False
baseNum = listNumbers[0]
for curNum in listNumbers[1:]:
if (baseNum + curNum) == numToMatch:
return True
# No match found so keep going
return checkCurNum(listNumbers[1:],numToMatch)
filename = "inputs\\2020\\input-day9.txt"
with open(filename) as f:
lines = f.readlines()
# convert list of strings to list of ints
listNumbers = list(map(int, lines))
windowSize = 25
invalidNumber = 0
# Starting at curIndex check each number through the end of the list
for curIndex in range(windowSize,len(listNumbers)):
matchFound = checkCurNum(listNumbers[curIndex-windowSize:curIndex+1],listNumbers[curIndex])
if not matchFound:
print("Number " + str(listNumbers[curIndex]) + " is invalid")
invalidNumber = listNumbers[curIndex]
startIndex = 0
endIndex = 0
# Find contiguous range that adds to invalid number
for curIndex in range(len(listNumbers)):
sum = listNumbers[curIndex]
sumFound = False
for secondIndex in range(curIndex+1, len(listNumbers)):
sum += listNumbers[secondIndex]
if sum == invalidNumber:
startIndex = curIndex
endIndex = secondIndex
sumFound = True
break
elif sum > invalidNumber:
break
if sumFound:
break
print("Match list: " + str(listNumbers[startIndex:endIndex+1]))
minNum = min(listNumbers[startIndex:endIndex+1])
maxNum = max(listNumbers[startIndex:endIndex+1])
print("Weakness: " + str(minNum + maxNum))
| {
"content_hash": "c352b18285ec6264b12e175341e59b86",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 95,
"avg_line_length": 32.36538461538461,
"alnum_prop": 0.6749851455733809,
"repo_name": "caw13/adventofcode",
"id": "b081f2fd2818a9aa7c7664ee8617516e70d47491",
"size": "1766",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/2020/day_nine_part1-2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "41798"
},
{
"name": "Ruby",
"bytes": "1762"
}
],
"symlink_target": ""
} |
import json
import pkg_resources
def get_pkg_license(pkgname):
"""
Given a package reference (as from requirements.txt),
return license listed in package metadata.
NOTE: This function does no error checking and is for
demonstration purposes only.
"""
pkgs = pkg_resources.require(pkgname)
pkg = pkgs[0]
try:
for line in pkg.get_metadata_lines('PKG-INFO'):
(k, v) = line.split(': ', 1)
if k == "License":
return v
except Exception:
return None
return None
def clean_license_string(lic):
lic = lic.lower()
lic = lic.replace("license", "")
lic = lic.replace("version", "")
lic = lic.replace(",", "")
lic = lic.strip()
lic = ' '.join(lic.split())
lic = lic.upper()
return lic
def main():
licenses = {}
for pkg in pkg_resources.working_set:
lic = get_pkg_license(pkg.project_name)
if lic:
lic = clean_license_string(lic)
lic_list = licenses.get(lic)
if lic_list is not None:
lic_list.append(pkg.project_name)
licenses[str(lic)] = lic_list
else:
licenses[str(lic)] = [pkg.project_name]
print json.dumps(licenses, indent=2)
if __name__ == '__main__':
main()
| {
"content_hash": "9bd93c386f6c8f68649bbe07f24e1b84",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 57,
"avg_line_length": 22.448275862068964,
"alnum_prop": 0.5668202764976958,
"repo_name": "cpenner461/tellmewhen",
"id": "6e8fb807d9b1c6eb762c573241ff619085a3cf2b",
"size": "1349",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "licenses.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3855"
},
{
"name": "HTML",
"bytes": "36756"
},
{
"name": "JavaScript",
"bytes": "1248"
},
{
"name": "Python",
"bytes": "22441"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
from django import forms
class SyncDataForm(forms.Form):
"""
Form for running sync_data management command.
"""
pass
| {
"content_hash": "c57a368e1cd5bd4a4abef4c844f696ba",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 56,
"avg_line_length": 17.636363636363637,
"alnum_prop": 0.6958762886597938,
"repo_name": "pythonindia/junction",
"id": "fda778c2ae5e70e2c0c0754d2130b3c99bb09957",
"size": "218",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "junction/tickets/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "190844"
},
{
"name": "HTML",
"bytes": "161794"
},
{
"name": "JavaScript",
"bytes": "49000"
},
{
"name": "Python",
"bytes": "379163"
},
{
"name": "Shell",
"bytes": "595"
}
],
"symlink_target": ""
} |
"""Add RepositoryOption
Revision ID: 152c9c780e
Revises: 4d302aa44bc8
Create Date: 2013-11-26 17:48:21.180630
"""
# revision identifiers, used by Alembic.
revision = '152c9c780e'
down_revision = '4d302aa44bc8'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('repositoryoption',
sa.Column('id', sa.GUID(), nullable=False),
sa.Column('repository_id', sa.GUID(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=False),
sa.Column('value', sa.Text(), nullable=False),
sa.Column('date_created', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('repository_id','name', name='unq_repositoryoption_name')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('repositoryoption')
### end Alembic commands ###
| {
"content_hash": "65efce0dc9c5754d98dc4833270d577c",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 81,
"avg_line_length": 29.771428571428572,
"alnum_prop": 0.6842610364683301,
"repo_name": "alex/changes",
"id": "31969f2825609ee05abdaf0ca183d8e8c29a57f2",
"size": "1042",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "migrations/versions/152c9c780e_add_repositoryoption.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
"""The scope builder interface."""
from __future__ import absolute_import
from . import ty as _ty
from . import expr as _expr
from .._ffi import base as _base
class WithScope(object):
"""A wrapper for builder methods which introduce scoping.
Parameters
----------
enter_value: object
The value returned by enter.
"""
def __init__(self, enter_value, exit_cb):
self._enter_value = enter_value
self._exit_cb = exit_cb
def __enter__(self):
return self._enter_value
def __exit__(self, ptype, value, trace):
if value:
raise value
self._exit_cb()
def _make_lets(bindings, ret_value):
"""Make a nested let expressions.
Parameters
----------
bindings: List[Tuple[tvm.relay.Var,tvm.relay.Expr]]
The sequence of let bindings
ret_value: tvm.relay.Expr
The final value of the expression.
Returns
-------
lets: tvm.relay.Expr
A nested let expression.
"""
if ret_value is None:
raise RuntimeError("ret is not called in this scope")
if isinstance(ret_value, _expr.If) and ret_value.false_branch is None:
raise RuntimeError("Creating an If expression without else.")
let_expr = ret_value
for var, value in reversed(bindings):
let_expr = _expr.Let(var, value, let_expr)
return let_expr
class ScopeBuilder(object):
"""Scope builder class.
Enables users to build up a nested
scope(let, if) expression easily.
Examples
--------
.. code-block: python
sb = relay.ScopeBuilder()
cond = relay.var("cond", 'bool')
x = relay.var("x")
y = relay.var("y")
with sb.if_scope(cond):
one = relay.const(1, "float32")
t1 = sb.let(t1, relay.add(x, one))
sb.ret(t1)
with sb.else_scope():
sb.ret(y)
print(sb.get().astext())
"""
def __init__(self):
self._bindings = [[]]
self._ret_values = [None]
def _enter_scope(self):
self._bindings.append([])
self._ret_values.append(None)
def _exit_scope(self):
bindings = self._bindings.pop()
ret_value = self._ret_values.pop()
return bindings, ret_value
def let(self, var, value):
"""Create a new let binding.
Parameters
----------
var: Union[Tuple[str, relay.Type], tvm.relay.Var]
The variable or name of variable.
value: tvm.relay.Expr
The value to be bound
"""
if isinstance(var, (tuple, list)):
if len(var) > 2:
raise ValueError("Expect var to be Tuple[str, relay.Type]")
var = _expr.var(*var)
elif isinstance(var, _base.string_types):
var = _expr.var(var)
self._bindings[-1].append((var, value))
return var
def if_scope(self, cond):
"""Create a new if scope.
Parameters
----------
cond: tvm.relay.expr.Expr
The condition
Returns
-------
scope: WithScope
The if scope.
Note
----
The user must follows with an else scope.
"""
self._enter_scope()
def _on_exit():
bindings, ret_value = self._exit_scope()
if self._ret_values[-1] is not None:
raise RuntimeError("result already returned before if scope")
true_branch = _make_lets(bindings, ret_value)
self._ret_values[-1] = _expr.If(cond, true_branch, None)
return WithScope(None, _on_exit)
def else_scope(self):
"""Create a new else scope.
Returns
-------
scope: WithScope
The if scope.
"""
self._enter_scope()
def _on_exit():
bindings, ret_value = self._exit_scope()
partial_if = self._ret_values[-1]
no_else = not isinstance(partial_if, _expr.If) or partial_if.false_branch is not None
if no_else:
raise RuntimeError("else scope must follows")
false_branch = _make_lets(bindings, ret_value)
self._ret_values[-1] = _expr.If(partial_if.cond, partial_if.true_branch, false_branch)
return WithScope(None, _on_exit)
def type_of(self, expr):
"""
Compute the type of an expression.
Parameters
----------
expr: relay.Expr
The expression to compute the type of.
"""
if isinstance(expr, _expr.Var):
return expr.type_annotation
ity = _ty.IncompleteType()
var = _expr.var("unify", ity)
self.let(var, expr)
return ity
def ret(self, value):
"""Set the return value of this scope.
Parameters
----------
value: tvm.relay.expr.Expr
The return value.
"""
if self._ret_values[-1] is not None:
raise RuntimeError("ret value is already set in this scope.")
self._ret_values[-1] = value
def get(self):
"""Get the generated result.
Returns
-------
value: tvm.relay.expr.Expr
The final result of the expression.
"""
if len(self._bindings) != 1:
raise RuntimeError("can only call get at the outmost scope")
return _make_lets(self._bindings[-1], self._ret_values[-1])
| {
"content_hash": "bf266509078719535d9962358f31998c",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 98,
"avg_line_length": 26.842364532019705,
"alnum_prop": 0.5412002202238942,
"repo_name": "Laurawly/tvm-1",
"id": "726b3c6241fec9442e04e434dad978c114cf7752",
"size": "6235",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "python/tvm/relay/scope_builder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4093"
},
{
"name": "C",
"bytes": "351611"
},
{
"name": "C++",
"bytes": "11660999"
},
{
"name": "CMake",
"bytes": "228510"
},
{
"name": "Cuda",
"bytes": "16902"
},
{
"name": "Cython",
"bytes": "28979"
},
{
"name": "Go",
"bytes": "111527"
},
{
"name": "HTML",
"bytes": "2664"
},
{
"name": "Java",
"bytes": "199950"
},
{
"name": "JavaScript",
"bytes": "15305"
},
{
"name": "Makefile",
"bytes": "67149"
},
{
"name": "Objective-C",
"bytes": "24259"
},
{
"name": "Objective-C++",
"bytes": "87655"
},
{
"name": "Python",
"bytes": "16256580"
},
{
"name": "RenderScript",
"bytes": "1895"
},
{
"name": "Rust",
"bytes": "391076"
},
{
"name": "Shell",
"bytes": "228674"
},
{
"name": "TypeScript",
"bytes": "94385"
}
],
"symlink_target": ""
} |
import os
import xml.dom.minidom as dom
from .sound import Record, DataBase
def check_year(year):
if year not in [1, 2]:
raise(ValueError, "Wrong year version: %d (should be 1 or 2)" % year)
def _filter_XML(path):
return (os.path.isfile(path)
and os.path.splitext(path)[-1].lower() == '.xml')
def _get_XML_files_in(dirname):
"""Returns list of xml files in given directory
(detection is extension based).
"""
file_paths = [os.path.join(dirname, f) for f in os.listdir(dirname)]
return filter(_filter_XML, file_paths)
def _get_tag_node_name(year):
return 'vistag' + ('' if year == 1 else 's')
class AcornsDB(DataBase):
XML_DIR = 'XML'
WAV_DIR = 'WAV'
WAV_EXT = '.wav'
def __init__(self):
DataBase.__init__(self)
def from_ACORNS_root(self, root, year=1):
"""
@param version: Acorns version, 1 for year 1, 2 for year 2 (default)
"""
check_year(year)
root = os.path.abspath(root)
tag_node_name = _get_tag_node_name(year)
self.root = root
for s in self.get_speakers(year):
spk_id = self.add_speaker(s)
# Parse files and populate records
spk_root = os.path.join(self.root, self.XML_DIR, s)
for xml_file in _get_XML_files_in(spk_root):
rec = self._parse_record(spk_id, s, xml_file, tag_node_name)
self.add_record(rec)
self.sort()
def _parse_record(self, speaker_id, speaker, xml_file, tag_node_name):
parsed = dom.parse(os.path.join(self.root, self.XML_DIR, speaker,
xml_file))
# Ignore other than first utterance
utt = parsed.getElementsByTagName('utterance')[0]
style = utt.getElementsByTagName('style')[0].getAttribute('value')
audio = utt.getElementsByTagName(
'audio-file'
)[0].getAttribute('value') + self.WAV_EXT
tag_names = utt.getElementsByTagName(
tag_node_name
)[0].childNodes[0].data
tags = [self.get_tag_add(tn)
for tn in tag_names.split()]
trans = utt.getElementsByTagName(
'trans'
)[0].childNodes[2].data.strip()
return Record(self, speaker_id, audio, tags, trans, style)
@classmethod
def n_speakers(cls, year):
return 4 if year == 1 else 10
@classmethod
def get_speakers(cls, year):
n_speakers = cls.n_speakers(year)
return ['Speaker-%.2d' % i for i in range(1, 1 + n_speakers)]
| {
"content_hash": "88fadde78483a53f2c8b29dbc9029d4e",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 77,
"avg_line_length": 31.864197530864196,
"alnum_prop": 0.5749709414955444,
"repo_name": "omangin/multimodal",
"id": "c9aa4e901926092f034143b3be9db6ec67785681",
"size": "2581",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "multimodal/db/models/acorns.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "286366"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from datetime import datetime
import json
from time import time
import types
import unittest
import normalize.exc as exc
from normalize.coll import list_of
from normalize.record import Record
from normalize.visitor import VisitorPattern
from testclasses import acent
from testclasses import acent_attributes
from testclasses import JsonStarList
from testclasses import maia
from testclasses import NamedStarList
from testclasses import PullRequest
from testclasses import StarList
from testclasses import StarSystem
from testclasses import Wall
from testclasses import wall_one
JSON_CAN_DUMP = (basestring, int, float, long, dict, list, types.NoneType)
class SimpleDumper(VisitorPattern):
@classmethod
def apply(self, value, *args):
if isinstance(value, JSON_CAN_DUMP):
dumpable = value
elif isinstance(value, datetime):
dumpable = value.isoformat()
else:
raise Exception("Can't dump %r" % value)
return dumpable
class AssertDiffTest(unittest.TestCase):
def assertDiffs(self, a, b, expected, **kwargs):
differences = set(str(x) for x in a.diff(b, **kwargs))
self.assertEqual(
differences,
set("<DiffInfo: %s>" % x for x in expected)
)
class TestVisitor(AssertDiffTest):
def setUp(self):
self.acent_json_data = {
'name': 'Alpha Centauri',
'components': [{'hip_id': 71683, 'name': 'Alpha Centauri A'},
{'hip_id': 71681, 'name': 'Alpha Centauri B'},
{'hip_id': 70890, 'name': 'Alpha Centauri C'}],
'attributes': acent_attributes,
}
self.nsl_json_data = {
'name': 'Alpha Centauri',
'values': self.acent_json_data['components']
}
def test_simple_dumper(self):
dumpable = SimpleDumper.visit(wall_one)
self.assertIsInstance(dumpable['posts'][0], dict)
self.assertEqual(dumpable['posts'][0]['edited'], "2001-09-09T01:46:40")
json.dumps(dumpable) # assert doesn't throw
wall_roundtripped = SimpleDumper.cast(Wall, dumpable)
self.assertDiffs(wall_one, wall_roundtripped, {})
self.assertDiffs(wall_one, Wall(dumpable), {})
def test_intro_example_dump(self):
dumped = SimpleDumper.visit(acent)
self.assertEqual(dumped, self.acent_json_data)
def test_intro_example_cast(self):
self.assertDiffs(acent, StarSystem(self.acent_json_data), {})
self.assertDiffs(
acent, SimpleDumper.cast(StarSystem, self.acent_json_data),
{},
)
def test_complex_dump(self):
nsl = NamedStarList(acent.components)
nsl.name = "Alpha Centauri"
dumped = SimpleDumper.visit(nsl)
self.assertEqual(dumped, self.nsl_json_data)
def test_complex_dump2(self):
dumped = SimpleDumper.visit(maia)
maia2 = SimpleDumper.cast(type(maia), dumped)
self.assertEqual(maia.diff(maia2), [])
self.assertEqual(maia2.coordinates['ICRS'][2], "49.60656")
self.assertEqual(maia2.designations['HR'], "1149")
def test_complex_cast(self):
nsl = NamedStarList(**(self.nsl_json_data))
self.assertDiffs(
nsl, SimpleDumper.cast(NamedStarList, self.nsl_json_data),
{},
)
def test_dump_types(self):
typeinfo = SimpleDumper.reflect(NamedStarList)
self.assertEqual(
typeinfo['itemtype']['properties']['hip_id']['type'],
'int',
)
typeinfo = SimpleDumper.reflect(Wall)
self.assertEqual(typeinfo['properties']['owner']['name'], 'Person')
self.assertEqual(
typeinfo['properties']['owner']['properties']['interests']['type'],
'list',
)
def test_json_dump(self):
plain_list = StarList(self.acent_json_data['components'])
json_list = JsonStarList(self.acent_json_data['components'])
plain_dumped = SimpleDumper.visit(plain_list)
json_dumped = SimpleDumper.visit(json_list)
self.assertEqual(plain_dumped, json_dumped)
def test_cast_garbage(self):
for garbage in (
"green cheese", [], (),
{'values': {"foo": "bar"}},
self.acent_json_data['components'],
):
with self.assertRaises(exc.VisitorGrokRecordError):
SimpleDumper.cast(NamedStarList, garbage)
def test_cast_complex_filtered(self):
# this works because the properties are filtered out; normally this
# filtering would be due to 'extraneous' property settings.
# MultiFieldSelector doesn't currently distinguish between 'None' =>
# all items in collection vs 'None' => all, so use a filter which
# mentions each of the items in the set.
nsl = SimpleDumper.cast(
NamedStarList,
self.acent_json_data['components'],
visit_filter=tuple([x, 'hip_id'] for x in range(0, 3)),
)
self.assertEqual(len(nsl), 3)
def test_visit_complex_filtered(self):
nsl = NamedStarList(**(self.nsl_json_data))
visited = SimpleDumper.visit(
nsl, filter=tuple([x, 'hip_id'] for x in range(0, 3)),
)
self.assertEqual(
visited, list(
{'hip_id': x['hip_id']} for x in
self.acent_json_data['components']
),
)
class TestTypeUnionCases(AssertDiffTest):
def setUp(self):
self.open_pr = PullRequest(number=123, merged_at=None)
self.closed_pr = PullRequest(
number=456,
merged_at=datetime.fromtimestamp(time() - 20 * 86400),
)
def test_type_union_dump(self):
dumped = SimpleDumper.visit(self.open_pr, ignore_none=False)
self.assertIn("created_at", dumped)
self.assertRegexpMatches(
dumped['created_at'], r'^\d{4}-\d{2}-\d{2}T.*',
)
self.assertEqual(dumped['merged_at'], None)
dumped = SimpleDumper.visit(self.closed_pr)
self.assertRegexpMatches(
dumped['created_at'], r'^\d{4}-\d{2}-\d{2}T.*',
)
self.assertIn("created_at", dumped)
self.assertIn('merged_at', dumped)
def test_type_union_load(self):
pr_dict = {
"number": "5125",
"created_at": "2014-07-23T12:34:56Z",
"merged_at": None,
}
my_pr = PullRequest(pr_dict)
pr_2 = SimpleDumper.cast(PullRequest, pr_dict, ignore_none=False)
self.assertDiffs(my_pr, pr_2, {})
def test_type_union_typeinfo(self):
schema = SimpleDumper.reflect(PullRequest)
self.assertEqual(schema['properties']['merged_at']['type'],
["datetime", "NoneType"])
def test_cast_collection(self):
RecordList = list_of(Record)
casted = VisitorPattern.cast(RecordList, [{}, {}])
self.assertIsInstance(casted[0], Record)
self.assertIsInstance(casted, RecordList)
empty_casted = VisitorPattern.cast(RecordList, [])
self.assertIsInstance(empty_casted, RecordList)
| {
"content_hash": "4a9cc3e2d05db70bbcdc511dd3c8dab4",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 79,
"avg_line_length": 35.34313725490196,
"alnum_prop": 0.6069348127600555,
"repo_name": "samv/normalize",
"id": "5bdb8068bf6d2850310d6bbac71391dd487b0a77",
"size": "7772",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_visitor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "362038"
}
],
"symlink_target": ""
} |
from selenium import webdriver
import argparse
import os
import os.path
import sys
from datetime import datetime
import pandas as pd
import json
from pprint import pprint
import glob
import linecache
import logging
def get_driver(driver_option = None):
driver = None
if not driver_option:
driver_option = "C:\Program Files (x86)\Google\Chrome\Application\chromedriver.exe"
if 'gecko' in driver_option:
firefoxProfile = webdriver.FirefoxProfile()
firefoxProfile.set_preference('permissions.default.stylesheet', 2)
firefoxProfile.set_preference('permissions.default.image', 2)
firefoxProfile.set_preference('dom.ipc.plugins.enabled.libflashplayer.so', 'false')
firefoxProfile.set_preference("http.response.timeout", 10)
firefoxProfile.set_preference("dom.max_script_run_time", 10)
# add profile for better timeout control.
driver = webdriver.Firefox(executable_path=driver_option,
firefox_profile=firefoxProfile)
elif 'chrome' in driver_option:
# "C:\Program Files (x86)\Google\Chrome\Application\chromedriver.exe"
if not os.path.exists(driver_option):
raise IOError("Can't find chrome webdriver:{}".format(driver_option))
os.environ['webdriver.chrome.driver'] = driver_option
driver = webdriver.Chrome(driver_option)
else:
driver = webdriver.PhantomJS(executable_path=driver_option)
return driver
def convert_file(inputfile,outputfile):
'''
convert file in latin_1 encoding to utf-8 encoding.
:param inputfile:
:param outputfile:
:return:
'''
with open(inputfile,'r', encoding='latin_1') as fr, open(outputfile,'w',encoding='utf-8') as fw:
fw.write(fr.read())
def get_or_create_dataframe(existingfile, columnslist, dtypedict=None):
if os.path.exists(existingfile) and os.path.isfile(existingfile):
return pd.read_csv(existingfile, dtype=dtypedict)
else:
return pd.DataFrame(columns=columnslist)
def get_default_cols_list():
return ["Symbol","Name","LastSale","MarketCap","IPOyear","Sector","industry","Summary Quote","market"]
def get_basedata_path():
return get_subfolder_path('basedata')
def get_exportdata_path():
return get_subfolder_path('exportdata')
def get_subfolder_path(folder_name):
dir_path = os.path.dirname(os.path.realpath(__file__))
subfolder_path = os.path.join(dir_path, "../{}/".format(folder_name))
if os.path.exists(subfolder_path) and os.path.isdir(subfolder_path):
return subfolder_path
raise IOError("can't find {} folder!".format(folder_name))
def get_keyword_file(company_listfile):
return os.path.join(os.path.dirname(company_listfile),"keywords.csv")
def get_company_list_file(market):
dir_path = os.path.dirname(os.path.realpath(__file__))
json_file = os.path.join(dir_path,"../basedata/", "marketlist.json")
if not (os.path.exists(json_file) and os.path.isfile(json_file)):
return get_minimal_company_list_file(market)
with open(json_file, 'r') as json_source:
company_list = json.load(json_source)
try:
list_file = company_list[market]['ListFile']
except:
pprint(company_list)
raise ValueError("Can't find proper market in configuration file.")
if not os.path.isabs(list_file):
list_file = os.path.join(dir_path, '../basedata/', list_file)
if not (os.path.exists(list_file) and os.path.isfile(list_file)):
raise ValueError("Can't find company list file:{}".format(list_file))
print(list_file)
return list_file
def get_minimal_company_list_file(market):
markets = ['NASDAQ', 'NYSE', 'AMEX']
if not market in markets:
raise ValueError("market should be one of:{}".format(markets))
market_list = {
"NASDAQ": "companylist-NASDAQ.csv",
"AMEX": "companylist-AMEX.csv",
"NYSE": "companylist-NYSE.csv"
}
dir_path = os.path.dirname(os.path.realpath(__file__))
companylist_file = os.path.join(dir_path, "../basedata/", market_list[market])
if not (os.path.exists(companylist_file) and os.path.isfile(companylist_file)):
raise ValueError("Can't find company list file:{}".format(companylist_file))
return companylist_file
def PrintException():
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
print('EXCEPTION IN ({}, LINE {} "{}"): {}'.format(filename, lineno, line.strip(), exc_obj))
def get_logger_level(loglevel):
levels = {'CRITICAL': logging.CRITICAL,
'ERROR': logging.ERROR,
'WARNING': logging.WARNING,
'INFO': logging.INFO,
'DEBUG': logging.DEBUG
}
return levels.get(loglevel, logging.INFO)
def pricejohn_commandline_parser():
'''
parser command line options for descjohn.py and return with a dictionary
:return:
'''
parser = argparse.ArgumentParser(description="John's ticker description tool command line parser")
parser.add_argument('-driver', action="store", dest="driver", required=False,
default='/Users/desheng/builds/phantomjs/bin/phantomjs',
help="path to gecko driver or phantomjs")
parser.add_argument('-source', action="store", dest="source", required=False,
default="ticker_all_verified_final.csv",
help="file which includes all verified ticker, relative to basedata folder.")
parser.add_argument('-symbol', action="store", dest="symbol", required=False,
default="symbol",
help="symbol field column header")
parser.add_argument('-market', action="store", dest="market", required=False,
default="market",
help="market field column header.")
parser.add_argument('-target', action="store", dest="target", required=False,
default="yahoo",
help="indicate yahoo or google, by default, it will be yahoo.")
parser.add_argument('-total', action="store", dest="total", required=False, default=5, type=int,
help='total pieces to split.')
parser.add_argument('-piece', action="store", dest="piece", required=False, default=-1, type=int,
help='value should be between 1 and total, -1 means all.')
parser.add_argument('-dropwait', action="store", dest="dropwait", required=False, default=20, type=int,
help='time to wait for dropdown list')
parser.add_argument('-downloadwait', action="store", dest="downloadwait", required=False, default=5, type=int,
help='time to wait for download href link.')
parser.add_argument('-bypass', action="store_true", dest="bypass", required=False,
default=False,
help="if desc exists, then bypass it") #control broken download
parser.add_argument('-startdate', action="store", dest="startdate", required=False,
default="20000101",
help="start date to extract stock from google.") # control broken download
parser.add_argument('-loglevel', action="store", dest="loglevel", required=False,
default="20000101",
help="logger level: INFO, DEBUG etc") # control broken download
parser.add_argument('-entry_mode', action="store", dest="entry_mode", required=False,
default="shortpath", #fullpath, shortpath
help="directly access to history price link or start from google finance")
args = parser.parse_args()
return_options = {}
return_options["driver"] = args.driver
return_options['source'] = args.source
return_options['symbol'] = args.symbol
return_options['market'] = args.market
return_options['target'] = args.target
return_options['dropwait'] = args.dropwait #need validate
return_options['downloadwait'] = args.downloadwait #need validate
return_options['bypass'] = args.bypass
return_options['startdate'] = args.startdate
return_options['loglevel'] = args.loglevel
return_options['entry_mode'] = args.entry_mode
if not (1 < args.total < 100):
raise IndexError("Total should be bigger than 2 and less than 100, but now:{}".format(args.total))
if not (args.piece == -1 or 0 < args.piece <= args.total):
raise IndexError(
"Piece parameter should be either -1 (all) or between 1 and total, but now:{}".format(args.piece))
return_options['total'] = args.total
if args.piece == -1:
return_options['piece'] = None
else:
return_options['piece'] = args.piece
return return_options
def descjohn_commandline_parser():
'''
parser command line options for descjohn.py and return with a dictionary
:return:
'''
parser = argparse.ArgumentParser(description="John's ticker description tool command line parser")
parser.add_argument('-driver', action="store", dest="driver", required=False,
default='/Users/desheng/builds/phantomjs/bin/phantomjs',
help="path to gecko driver or phantomjs")
parser.add_argument('-source', action="store", dest="source", required=False,
default="ticker_all_verified_final.csv",
help="file which includes all verified ticker, relative to basedata folder.")
parser.add_argument('-symbol', action="store", dest="symbol", required=False,
default="symbol",
help="symbol field column header")
parser.add_argument('-market', action="store", dest="market", required=False,
default="market",
help="market field column header.")
parser.add_argument('-bypass', action="store_true", dest="bypass", required=False,
default=False,
help="if desc exists, then bypass it") #control broken download
parser.add_argument('-total', action="store", dest="total", required=False, default=5, type=int,
help='total pieces to split.')
parser.add_argument('-piece', action="store", dest="piece", required=False, default=-1, type=int,
help='value should be between 1 and total, -1 means all.')
parser.add_argument('-downloadwait', action="store", dest="downloadwait", required=False, default=15, type=int,
help='time to wait for download href link.')
args = parser.parse_args()
return_options = {}
return_options["driver"] = args.driver
return_options['source'] = args.source
return_options['symbol'] = args.symbol
return_options['market'] = args.market
return_options['bypass'] = args.bypass
return_options['total'] = args.total
return_options['piece'] = args.piece
return_options['downloadwait'] = args.downloadwait
return return_options
def cleanjohn_commandline_parser():
parser = argparse.ArgumentParser(description="John's ticker clean tool command line parser")
parser.add_argument('-driver', action="store", dest="driver", required=False,
default='/Users/desheng/builds/phantomjs/bin/phantomjs',
help="path to gecko driver or phantomjs")
parser.add_argument('-source', action="store", dest="source", required=False,
default="ticker_all.csv",
help="file which includes all original ticker, relative to basedata folder.")
parser.add_argument('-record', action="store", dest="record", required=False,
default="ticker_all_records.csv",
help="file which includes checking record for all original ticker, relative to basedata folder.")
parser.add_argument('-verified', action="store", dest="verified", required=False,
default="ticker_all_verified.csv",
help="file which includes verfied record for all original ticker, relative to basedata folder.")
parser.add_argument('-notfound', action="store", dest="notfound", required=False,
default="ticker_all_notfound.csv",
help="file which includes not-found record for all original ticker, relative to basedata folder.")
args = parser.parse_args()
sourcefile = os.path.join(get_basedata_path(),args.source)
if not (os.path.exists(sourcefile) and os.path.isfile(sourcefile)):
raise IOError("Source file not found:{}".format(args.source))
if not (os.path.exists(args.driver) and os.path.isfile(args.driver)):
raise IOError("Can't find geco driver:{}".format(args.driver))
recordfile = os.path.join(get_basedata_path(),args.record)
verifiedfile = os.path.join(get_basedata_path(),args.verified)
notfoundfile = os.path.join(get_basedata_path(),args.notfound)
return args.driver,sourcefile,recordfile,verifiedfile,notfoundfile
def command_line_parser():
parser = argparse.ArgumentParser(description="Stock data downloader.")
parser.add_argument('-market', action="store", dest="market", required=True,
help="Market to download, it should be one of NASDAQ, NYSE, AMEX")
parser.add_argument('-root', action="store", dest="root", required=False,
default="./exportdata",
help="root folder to store downloaded data, for example . or ./exportdata/ etc")
parser.add_argument('-max', action="store", dest="max", required=False,
default="0", type=int,
help="max tickers to download, for example: 10 or 100 or 1000.")
parser.add_argument('-sector', action="store", dest="sector", required=False, default=None,
help='Sector to filter, for example: "Finance,Consumer Services"')
parser.add_argument('-industry', action="store", dest="industry", required=False, default=None,
help='industry to filter, for example: "Property-Casualty Insurers,Finance/Investors Services"')
parser.add_argument('-additional', action="store", dest="additional", required=False, default=None,
help='additional col to indicate selection.')
parser.add_argument('-start', action="store", dest="start", required=False, default=None,
help='start date for query, format:yyyymmdd 20100101.')
parser.add_argument('-end', action="store", dest="end", required=False, default=None,
help='end date for query, format:yyyymmdd 20100101.')
args = parser.parse_args()
market = (args.market).upper()
companylist_file = get_company_list_file(market)
if not os.path.exists(args.root):
try:
os.makedirs(args.root)
except:
raise ValueError("Unexpected error:", sys.exc_info()[0])
sectorfilter = None
if args.sector:
sectorfilter = (args.sector).replace('"','')
industryfilter = None
if args.industry:
industryfilter = (args.industry).replace('"','')
additionalfilter = None
if args.additional:
additionalfilter = (args.additional).replace('"','')
startdate=None
if args.start:
try:
startdate= datetime.strptime(args.start, '%Y%m%d')
except:
raise ValueError("{} format error.".format(args.start))
enddate=None
if args.end:
try:
enddate= datetime.strptime(args.end, '%Y%m%d')
except:
raise ValueError('{} format error.'.format(args.end))
return companylist_file, args.root, args.max, args.market, sectorfilter,industryfilter,additionalfilter,startdate,enddate
class StockOption:
def __init__(self):
self.com_list = None
self.root = None
self.max = 0
self.market = None
self.startdate=None
self.enddate=None
def init_by_sec(self, companylistfile, root, max, market, sectorfilter, industryfilter, add_filter_col,startdate,enddate ):
'''
:param companylistfile: format of company list downloaded from sec
:param root: root folder to store data. default is ./exportdata
:param max: max tickers to be proccessed
:param market: market name, NASDAP, AMEX, NYSE etc
:param sectorfilter: string with common to indicate how many sector will be downloaded
:param industryfilter: string with common to indicate how many industry will be downloaded
:param add_filter_col: additional column with value "Y"
'''
com_list = pd.read_csv(companylistfile)
headers = list(com_list)
required_cols = ['Symbol', 'IPOyear', 'Sector', 'industry']
if add_filter_col:
required_cols.append(add_filter_col)
if not self.check_header(required_cols, headers):
raise ValueError("{},Some header missed in file:{}".format(required_cols, companylistfile))
if not os.path.exists(root):
try:
os.makedirs(root)
except:
raise ValueError("Unexpected error:", sys.exc_info()[0])
if not os.path.isdir(root):
raise ValueError("{} should be folder.".format(root))
if sectorfilter:
sector_filter_list = sectorfilter.split(",")
com_list = com_list.loc[com_list['Sector'].isin(sector_filter_list)]
if industryfilter:
industry_filter_list = industryfilter.split(",")
com_list = com_list.loc[com_list['industry'].isin(industry_filter_list)]
if add_filter_col:
com_list = com_list.loc[com_list[add_filter_col].isin(['Y', 'y', 'T', 'TRUE','True', 'true', 'YES', 'Yes', 'yes'])]
if max>0:
com_list = com_list.head(max)
self.startdate=startdate
self.enddate=enddate
#com_list.loc[com_list['Sector'] == 'n/a', 'Sector'] = 'NotAvailable'
#com_list.loc[com_list['industry'] == 'n/a', 'industry'] = 'NotAvailable'
#test.loc[test['Sector']=='n/a','industry']=test['industry'].str.lower()+"hahaha"
#above code is a way to update value by selection
com_list.loc[com_list['IPOyear'] == 'n/a', 'IPOyear'] = '2000'
com_list['industry'].replace(['&','\/','\\\\',':','n_a','n\/a'],
['_','_','_','_','NotAvailable','NotAvailable'],
regex=True, inplace=True)
com_list['Sector'].replace(['&', '\/', '\\\\', ':', 'n_a', 'n\/a'],
['_', '_', '_', '_', 'NotAvailable', 'NotAvailable'],
regex=True, inplace=True)
self.com_list = com_list
self.root = root
self.max = max
self.market = market
def get_total_list(self):
if self.com_list:
return len(self.com_list)
else:
raise ValueError("Data hasn't bee initialized yet")
def get_processed_file(self):
if self.root:
return os.path.join(self.root, "processed_tickets.csv")
def check_header(self, required_cols, total_cols):
'''
make sure total_cols includes all required_cols
:param required_cols:
:param total_cols:
:return:
'''
return len(required_cols) == len(set(required_cols) & set(total_cols))
| {
"content_hash": "fd61505970f8a9353828fc558f899162",
"timestamp": "",
"source": "github",
"line_count": 440,
"max_line_length": 127,
"avg_line_length": 44.972727272727276,
"alnum_prop": 0.6197695573074591,
"repo_name": "xudesheng/stock36",
"id": "0ccedd90ec3a667f68d98869139822245e2690e3",
"size": "19834",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stocks/helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "63"
},
{
"name": "Python",
"bytes": "73919"
}
],
"symlink_target": ""
} |
import numpy as np
import scipy as sp
import matplotlib.pylab as plt
'''
objects for storing pest related control file
'''
class controldata(object):
'''
object containing paramaters in the control data section of the pestcontrol file
Attributes:
RSTFLE : "restart or norestart" Instructs PEST whether to write restart data
PESTMODE : "esimation","prediction","regularisation","pareto" PEST mode of operation
NPAR : Number of Paramaters
NOBS : Number of Observations
NPARGP : Number of Paramapter Groups
NPRIOR : Number of Prior Information
NOBSG : Number of Observation Groups
MAXCOMPDIM : Number of Elements in the compresed Jacobian matrix
NTPLFLE : Number of Template Files
NINSFLE : Number of Instruction Files
PRECIS : "single or double" Precision
DPOINT : "point or nopoint" Whether to use Decimal Points
NUMCOM : Number of command lines used to run model
JACFILE : 0,1,-1 whether the model uses an external derivitives file
MESSFILE : 0,1 whether PEST writes a PEST-to-Model message file
RLAMBDA1 : initial Marquart lambda
RLAMFAC : dictates Marqaurt lambda adjustment process
PHIRATSUF : fractional objective function sufficient for end of current iterations
PHIREDLAM : termination criterion for Marquardt lamda search
NUMLAM : maximum number of marquart lambda test
JACUPDATE : activation of Broyden;s Jacobian Update procedure
LAMFORGIVE : treat model run failure during the lamda search as high objective function
RELPARMAX : paramater relative change limit
FACPARMAX : paramater factor change limit
FACORIG : minimum fraction of original paramearcheter value in evaulating relative change
IBOUNDSTICK : instruct PEST not to compute derivatives for parameter at its bounds
UPVECBEND : instructs PEST to bend parameter upgrade vector if parameter hits bounds
ABSPARMAX : paramater absolute change limit
PHIREDSWH : sets objective function change for introduction of central derivatives
NOPTMAX : -2,-1,0,or any number greater then zero number of optimisation iterations
PHIREDSTP : relative objective function reduction triggering termination
NPHISTP : number of successive iterations over which PHIREDSTP applies
NPHINORED : number of iterations since last drop in objective function to trigger termination
RELPARSTP : maximum relative paramater change triggering termination
NRELPAR : number of successive iteration over which RELPARSTP applies
PHISTOPTHRESH : objective function threshold triggering termination
LASTRUN : 0, 1 instructs PEST to undertake final model run with best paramaters
PHIABANDON : objective function value at which to abandon optimisation
ICOV : 0,1 record covariance matrix
ICOR : 0,1 record correlation matrix
IEIG : 0,1 record eigenvectiors in matrix file
IRES : 0,1 record resolution data
JCOSAVE : "jcosave or nojcosave" save best Jacobian fileas JCO file
VERBOSEREC : "verboserec or noverboserec" omit obeservation data from rec file
JCOSAVEITN : "jcosaveitn" or nojcosaveitn" write current jacobian matrix to iteration specific JCO file
REISAVEITN : "reisaveitn or noreisaveitn" Store residuals to iteration specific residuals file
PARSAVEITN : "parsaveitn or noparsaveitin" Store iteration specific paramater value files
'''
def __init__(self):
self.rstfile = 'norestart'
self.pestmode = 'regularisation'
self.npar = 17
self.nobs = 1342
self.npargp = 17
self.nprior = 0
self.nobsgp = 1
self.maxcompdim = ' '
self.ntplfle = 1
self.ninsfle = 1
self.precis = 'single'
self.dpoint = 'point'
self.numcom = 1
self.jacfile = 0
self.messfile = 0
self.obsreref = 'noobsreref'
self.rlambda1 = 60.0
self.rlamfac = -3.0
self.phiratsuf = 0.3
self.phiredlam = 0.03
self.numlam = 10
self.jacupdate = 999
self.lamforgive = 'nolamforgive'
self.derforgive = 'noderforgive'
self.relparmax = 3.0
self.facparmax = 3.0
self.facorig = 0.001
self.iboundstick = 0
self.upvecbend = ' '
self.absparmax = ' '
self.phiredswh = 0.1
self.noptmax = 30
self.phiredstp = 0.01
self.nphistp = 3
self.nphinored = 3
self.relparstp = 0.01
self.nrelpar = 3
self.phistropthresh = ' '
self.lastrun = ' '
self.phiabandon = ' '
self.icov = 0
self.icor = 0
self.ieig = 0
self.ires = 0
self.jcosave = 1
self.verboserec = ' '
self.jcosaveitn = ' '
self.reisaveitn = ' '
self.parsaveitn = ' '
self.parsaverun = ' '
return
def set_suggested_values(self):
'''
set values to those suggested in the PEST manual
'''
self.rlamfac = -3.0
self.rlambda1 = 10.0
return
class svd(object):
'''
Create a singular value decomposition
Attributes:
SVDMODE : 0,1 activates truncated singular value decomposition fir solution of the inverse problem
MAXSING : number of singular values at which truncation occurs
EIGTHRESH : <1 eiqenvalue ratio threshold for truncation
EIGWRITE : 0,1 determines content of scd output file
'''
def __init__(self):
self.svdmode = 1
self.maxsing = 13
self.eigthresh = 0.0000007
self.eigwrite = 0
return
def set_suggested_values(self):
'''
set values to those suggested in the PEST manual
'''
self.svdmode = 1
self.maxsing = input('How many adjustable paramaters are there? : ')
self.eigthresh = 0.0000005
self.eiqwrite = 0
return
class pargroup(object):
'''
create a pargroup line
Attributes:
PARGPNAME : name of the paramater group name
INCTYPE : "relative", "absolute" or "rel_to_max" method by which parameter increments are calculate
DERINC : absolutre or relative parameter increment
FORCEN : "switch,always_2,always_3,always_3,always_5,switch_5" determines whether central derivatives calculation is undertaken
DERINCMUL : derivative increment multiplier when undertaking centrl derivatives calculation
DERMTHD : "parabolic,outside_pts,best_fit,minvar,maxprec" method of central derivatives calculation
'''
def __init__(self,pargpname,inctyp,derinc,derinclb,forcen,derincmul, dermthd):
self.pargpname = pargpname
self.inctyp = inctyp
self.derinc = derinc
self.derinclb = derinclb
self.forcen = forcen
self.derincmul = derincmul
self.dermthd = dermthd
return
class paramater(object):
'''
create a paramater line for the PCF
Attributes:
PARNME : Parameter name
PARTRANS : "log,none,fixed,tied" paramater transformation
PARCHGLIM : "relative,factor,absolute(N) type of paramater change limit
PARVAL1 : inital paramater value
PARLBND : parameter lower bound
PARUBND : paramater upper bound
PARGP : paramater group name
SCALE : multiplication factor for paramater
OFFSET : number to addd to paramater
DERCOM : model command line used in computing paramater increments
'''
def __init__(self,parnme,partrans,parchglim,parval1,parlbnd,parupbnd,pargp,scale,offset,dercom):
self.parnme = parnme
self.partrans = partrans
self.parchglim = parchglim
self.parval1 = parval1
self.parlbnd = parlbnd
self.parupbnd = parupbnd
self.pargp = pargp
self.scale = scale
self.offset = offset
self.dercom = dercom
return
class obsgroup(object):
'''
create the obsgroup line
Attributes:
OBGNME : Name of observation group
'''
def __init__(self,obgnme):
self.obgnme = obgnme
return
class observation(object):
'''
create the observation data object
Attributes:
OBSNME : observation name
OBSVAL : observation value
WEIGHT : observation weight
OBGNME : group that observation belongs
'''
def __init__(self,obsnme,obsval,weight,obgnme):
self.obsnme = obsnme
self.obsval = obsval
self.weight = weight
self.obgnme = obgnme
class commandline(object):
'''
object containing method for running the model command line
Attributes:
COMMAND : Text which specifies how model is run
'''
def __init__(self,command):
self.command = command
return
class modelinout(object):
'''
contain the information on how to read and write model input file
Attributes :
TEMPFLE : Template File
INFLE : Input File
INSFLE : Instruction file for reading model output
OUTFLE : Output File
'''
def __init__(self,tempfle,infle,insfle,outfle):
self.tempfle = tempfle
self.infle = infle
self.insfle = insfle
self.outfle = outfle
return
class prior(object):
'''
container for a prior information line
Attributes:
PILBL : Prior information label
EQ : PFAC*PARNME + PFIC * log(PARNME) = PIVAL Text representing a prior information equation
WEIGHT : weight of the prior information
OBGPNME: Observation group that prior information belongs to
'''
def __init__(self,pilbl,eq,weight,obgpnme):
self.pilbl = pilbl
self.eq = eq
self.weight = weight
self.obgpnme = obgpnme
return
class regularisation(object):
'''
Regularisation section of the PEST control file
Attritutes:
PHIMLIM : target measurement objective function
PHIMACCEPT : acceptable measurement objective function
WFINIT : inital regulristion weight factor
WFMIN : minimum regularisation weight factor
WFMAX : maximum regularisation weight factor
WFFAC : regularisation weight factor adjjustment factor
WFTOL : covergence criterion for regulatrisation weight factor
IREGADJ : 0,1,2,3,4,5 instructs PEST to perform iner-regularisation group weight factor adjustments
'''
def __init__(self,phimlim,phimaccept,wfinit,wfmin,wfmax,wffac,wftol,iregadj):
self.phimlim = phimlim
self.phimaccept = phimaccept
self.wfinit = wfinit
self.wfmin = wfmin
self.wfmax = wfmax
self.wffac = wffac
self.wftol = wftol
self.iregadj = iregadj
return
def createPCF(fileName,controlObj,svdObj,pargpObjList,parDataObjList,obsgpObjList,obsObjList,commandObjList,modinoutObj,piObjList,regularObj):
'''
create a PCF by compiling the proper objects and writing them to file
'''
f = open(fileName, 'w')
f.write('pcf\n')
f.write('* control data\n')
f.write('%s %s\n' % (controlObj.rstfile,controlObj.pestmode))
f.write('%s %s %s %s %s %s\n' % (controlObj.npar,controlObj.nobs,controlObj.npargp,controlObj.nprior,controlObj.nobsgp,controlObj.maxcompdim))
f.write('%s %s %s %s %s %s %s %s\n' % (controlObj.ntplfle,controlObj.ninsfle,controlObj.precis,controlObj.dpoint,controlObj.numcom,controlObj.jacfile,\
controlObj.messfile,controlObj.obsreref))
f.write('%s %s %s %s %s %s %s %s\n' % (controlObj.rlambda1,controlObj.rlamfac,controlObj.phiratsuf,controlObj.phiredlam,controlObj.numlam,controlObj.jacupdate\
,controlObj.lamforgive,controlObj.derforgive))
f.write('%s %s %s %s %s %s\n' % (controlObj.relparmax,controlObj.facparmax,controlObj.facorig,controlObj.iboundstick,controlObj.upvecbend,controlObj.absparmax))
f.write('%s\n' % (controlObj.phiredswh))
f.write('%s %s %s %s %s %s %s %s %s\n' % (controlObj.noptmax,controlObj.phiredstp,controlObj.nphistp,controlObj.nphinored,controlObj.relparstp,controlObj.nrelpar,\
controlObj.phistropthresh,controlObj.lastrun,controlObj.phiabandon))
f.write('%s %s %s %s %s %s %s %s %s %s\n' % (controlObj.icov,controlObj.icor,controlObj.ieig,controlObj.ires,controlObj.jcosave,controlObj.verboserec,controlObj.jcosaveitn,\
controlObj.reisaveitn,controlObj.parsaveitn,controlObj.parsaverun))
f.write('* singular value decomposition\n')
f.write('%s\n' % (svdObj.svdmode))
f.write('%s %s\n' % (svdObj.maxsing,svdObj.eigthresh))
f.write('%s\n' % (svdObj.eigwrite))
f.write('* paramater groups\n')
for group in pargpObjList:
f.write('%s %s %s %s %s %s %s\n' % (group.pargpname,group.inctyp,group.derinc,group.derinclb,group.forcen,group.derincmul,group.dermthd))
f.write('* paramater data\n')
for data in parDataObjList:
f.write('%s %s %s %s %s %s %s %s %s %s\n' % (data.parnme,data.partrans,data.parchglim,data.parval1,data.parlbnd,data.parupbnd,data.pargp,data.scale,data.offset,data.dercom))
f.write('* observation groups\n')
for group in obsgpObjList:
f.write('%s \n' % (group.obgnme))
f.write('* observation data \n')
for obs in obsObjList:
f.write('%s %s %s %s \n' % (obs.obsnme,obs.obsval,obs.weight,obs.obgnme))
f.write('* model command line \n')
for commandLine in commandObjList:
f.write('%s' % (commandLine.command))
f.write('* model input/output\n')
f.write('%s %s\n' % (modinoutObj.tempfle,modinoutObj.infle))
f.write('%s %s\n' % (modinoutObj.insfle,modinoutObj.outfle))
f.write('* prior information\n')
for pi in piObjList:
f.write('%s %s %s %s\n' % (pi.pilbl,pi.eq,pi.weight,pi.obgpnme))
f.write('* regularisation\n')
f.write('%s %s\n' % (regularObj.phimlim,regularObj.phimaccept))
f.write('%s %s %s\n' % (regularObj.wfinit,regularObj.wfmin,regularObj.wfmax))
f.write('%s %s %s\n' % (regularObj.wffac,regularObj.wftol,regularObj.iregadj))
f.close()
return
| {
"content_hash": "72c0eb1c68eee7a6687726b3de5727dd",
"timestamp": "",
"source": "github",
"line_count": 351,
"max_line_length": 179,
"avg_line_length": 37.794871794871796,
"alnum_prop": 0.7057892356399819,
"repo_name": "nvoss12838/APE",
"id": "e7e7061e259e3ef6aeb65457d2e98e66c016b98c",
"size": "13266",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pcf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35959"
}
],
"symlink_target": ""
} |
from . import gxapi_cy
from geosoft.gxapi import GXContext, float_ref, int_ref, str_ref
### endblock ClassImports
### block Header
# NOTICE: The code generator will not replace the code in this block
### endblock Header
### block ClassImplementation
# NOTICE: Do not edit anything here, it is generated code
class GXLPT(gxapi_cy.WrapLPT):
"""
GXLPT class.
This class allows access to the current default line patterns.
It does not allow the definition of individual patterns. It is
is used primarily with `GXMAP <geosoft.gxapi.GXMAP>` class functions.
"""
def __init__(self, handle=0):
super(GXLPT, self).__init__(GXContext._get_tls_geo(), handle)
@classmethod
def null(cls):
"""
A null (undefined) instance of `GXLPT <geosoft.gxapi.GXLPT>`
:returns: A null `GXLPT <geosoft.gxapi.GXLPT>`
:rtype: GXLPT
"""
return GXLPT()
def is_null(self):
"""
Check if this is a null (undefined) instance
:returns: True if this is a null (undefined) instance, False otherwise.
:rtype: bool
"""
return self._internal_handle() == 0
# Miscellaneous
@classmethod
def create(cls):
"""
Creates a line pattern object with current default patterns.
:returns: `GXLPT <geosoft.gxapi.GXLPT>` Object
:rtype: GXLPT
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val = gxapi_cy.WrapLPT._create(GXContext._get_tls_geo())
return GXLPT(ret_val)
def get_lst(self, lst):
"""
Copies all pattern names into a `GXLST <geosoft.gxapi.GXLST>` object.
:param lst: `GXLST <geosoft.gxapi.GXLST>` Handle
:type lst: GXLST
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
self._get_lst(lst)
def get_standard_lst(self, lst):
"""
Copies the six standard line types into a `GXLST <geosoft.gxapi.GXLST>` object.
:param lst: `GXLST <geosoft.gxapi.GXLST>` Handle
:type lst: GXLST
.. versionadded:: 9.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** The six standard line types are "solid", "long dash", "dotted", "short dash", "long, short dash" and "dash dot".
"""
self._get_standard_lst(lst)
### endblock ClassImplementation
### block ClassExtend
# NOTICE: The code generator will not replace the code in this block
### endblock ClassExtend
### block Footer
# NOTICE: The code generator will not replace the code in this block
### endblock Footer | {
"content_hash": "a496a7969d8a89bae5492e380f105466",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 135,
"avg_line_length": 25.991304347826087,
"alnum_prop": 0.6155904984944798,
"repo_name": "GeosoftInc/gxpy",
"id": "e5621f7dc4c90e572c9eaae77217a899c3f00928",
"size": "3099",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geosoft/gxapi/GXLPT.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "4799134"
}
],
"symlink_target": ""
} |
from typing import Optional
from itertools import product
import numpy as np
from openfermion.ops import InteractionOperator, InteractionRDM
from openfermion.linalg import wedge
from openfermion.transforms import get_fermion_operator
from recirq.hfvqe.circuits import rhf_params_to_matrix
def get_matrix_of_eigs(w: np.ndarray) -> np.ndarray:
r"""Transform the eigenvalues for getting the gradient.
.. math:
f(w) \rightarrow
\frac{e^{i (\lambda_{i} - \lambda_{j})}}{i (\lambda_{i} - \lambda_{j})}
Args:
w: eigenvalues of C-matrix
Returns:
New array of transformed eigenvalues
"""
transform_eigs = np.zeros((w.shape[0], w.shape[0]), dtype=np.complex128)
for i, j in product(range(w.shape[0]), repeat=2):
if np.isclose(abs(w[i] - w[j]), 0):
transform_eigs[i, j] = 1
else:
transform_eigs[i, j] = (np.exp(1j *
(w[i] - w[j])) - 1) / (1j *
(w[i] - w[j]))
return transform_eigs
class RestrictedHartreeFockObjective():
"""Objective function for Restricted Hartree-Fock.
The object transforms a variety of input types into the appropriate output.
It does this by analyzing the type and size of the input based on its
knowledge of each type.
"""
def __init__(self, hamiltonian: InteractionOperator, num_electrons: int):
self.hamiltonian = hamiltonian
self.fermion_hamiltonian = get_fermion_operator(self.hamiltonian)
self.num_qubits = hamiltonian.one_body_tensor.shape[0]
self.num_orbitals = self.num_qubits // 2
self.num_electrons = num_electrons
self.nocc = self.num_electrons // 2
self.nvirt = self.num_orbitals - self.nocc
self.occ = list(range(self.nocc))
self.virt = list(range(self.nocc, self.nocc + self.nvirt))
def rdms_from_opdm_aa(self, opdm_aa) -> InteractionRDM:
"""Generate the RDM from just the alpha-alpha block.
Due to symmetry, the beta-beta block is the same, and the other
blocks are zero.
Args:
opdm_aa: The alpha-alpha block of the RDM
"""
opdm = np.zeros((self.num_qubits, self.num_qubits), dtype=complex)
opdm[::2, ::2] = opdm_aa
opdm[1::2, 1::2] = opdm_aa
tpdm = wedge(opdm, opdm, (1, 1), (1, 1))
rdms = InteractionRDM(opdm, 2 * tpdm)
return rdms
def energy_from_opdm(self, opdm_aa: np.ndarray) -> float:
"""Return the energy.
Args:
opdm: The alpha-alpha block of the RDM
"""
rdms = self.rdms_from_opdm_aa(opdm_aa)
return rdms.expectation(self.hamiltonian).real
def global_gradient_opdm(self, params: np.ndarray, alpha_opdm: np.ndarray):
opdm = np.zeros((self.num_qubits, self.num_qubits), dtype=np.complex128)
opdm[::2, ::2] = alpha_opdm
opdm[1::2, 1::2] = alpha_opdm
tpdm = 2 * wedge(opdm, opdm, (1, 1), (1, 1))
# now go through and generate all the necessary Z, Y, Y_kl matrices
kappa_matrix = rhf_params_to_matrix(params,
len(self.occ) + len(self.virt),
self.occ, self.virt)
kappa_matrix_full = np.kron(kappa_matrix, np.eye(2))
w_full, v_full = np.linalg.eigh(
-1j * kappa_matrix_full) # so that kappa = i U lambda U^
eigs_scaled_full = get_matrix_of_eigs(w_full)
grad = np.zeros(self.nocc * self.nvirt, dtype=np.complex128)
kdelta = np.eye(self.num_qubits)
# NOW GENERATE ALL TERMS ASSOCIATED WITH THE GRADIENT!!!!!!
for p in range(self.nocc * self.nvirt):
grad_params = np.zeros_like(params)
grad_params[p] = 1
Y = rhf_params_to_matrix(grad_params,
len(self.occ) + len(self.virt), self.occ,
self.virt)
Y_full = np.kron(Y, np.eye(2))
# Now rotate Y into the basis that diagonalizes Z
Y_kl_full = v_full.conj().T @ Y_full @ v_full
# now rotate
# Y_{kl} * (exp(i(l_{k} - l_{l})) - 1) / (i(l_{k} - l_{l}))
# into the original basis
pre_matrix_full = v_full @ (eigs_scaled_full *
Y_kl_full) @ v_full.conj().T
grad_expectation = -1.0 * np.einsum(
'ab,pq,aq,pb',
self.hamiltonian.one_body_tensor,
pre_matrix_full,
kdelta,
opdm,
optimize='optimal').real
grad_expectation += 1.0 * np.einsum(
'ab,pq,bp,aq',
self.hamiltonian.one_body_tensor,
pre_matrix_full,
kdelta,
opdm,
optimize='optimal').real
grad_expectation += 1.0 * np.einsum(
'ijkl,pq,iq,jpkl',
self.hamiltonian.two_body_tensor,
pre_matrix_full,
kdelta,
tpdm,
optimize='optimal').real
grad_expectation += -1.0 * np.einsum(
'ijkl,pq,jq,ipkl',
self.hamiltonian.two_body_tensor,
pre_matrix_full,
kdelta,
tpdm,
optimize='optimal').real
grad_expectation += -1.0 * np.einsum(
'ijkl,pq,kp,ijlq',
self.hamiltonian.two_body_tensor,
pre_matrix_full,
kdelta,
tpdm,
optimize='optimal').real
grad_expectation += 1.0 * np.einsum(
'ijkl,pq,lp,ijkq',
self.hamiltonian.two_body_tensor,
pre_matrix_full,
kdelta,
tpdm,
optimize='optimal').real
grad[p] = grad_expectation
return grad
def generate_hamiltonian(one_body_integrals: np.ndarray,
two_body_integrals: np.ndarray,
constant: float,
EQ_TOLERANCE: Optional[float] = 1.0E-12):
n_qubits = 2 * one_body_integrals.shape[0]
# Initialize Hamiltonian coefficients.
one_body_coefficients = np.zeros((n_qubits, n_qubits))
two_body_coefficients = np.zeros((n_qubits, n_qubits, n_qubits, n_qubits))
# Loop through integrals.
for p in range(n_qubits // 2):
for q in range(n_qubits // 2):
# Populate 1-body coefficients. Require p and q have same spin.
one_body_coefficients[2 * p, 2 * q] = one_body_integrals[p, q]
one_body_coefficients[2 * p + 1, 2 * q +
1] = one_body_integrals[p, q]
# Continue looping to prepare 2-body coefficients.
for r in range(n_qubits // 2):
for s in range(n_qubits // 2):
# Mixed spin
two_body_coefficients[2 * p, 2 * q + 1, 2 * r + 1, 2 *
s] = (two_body_integrals[p, q, r, s] /
2.)
two_body_coefficients[2 * p + 1, 2 * q, 2 * r, 2 * s +
1] = (two_body_integrals[p, q, r, s] /
2.)
# Same spin
two_body_coefficients[2 * p, 2 * q, 2 * r, 2 *
s] = (two_body_integrals[p, q, r, s] /
2.)
two_body_coefficients[2 * p + 1, 2 * q + 1, 2 * r +
1, 2 * s +
1] = (two_body_integrals[p, q, r, s] /
2.)
# Truncate.
one_body_coefficients[
np.absolute(one_body_coefficients) < EQ_TOLERANCE] = 0.
two_body_coefficients[
np.absolute(two_body_coefficients) < EQ_TOLERANCE] = 0.
# Cast to InteractionOperator class and return.
molecular_hamiltonian = InteractionOperator(constant, one_body_coefficients,
two_body_coefficients)
return molecular_hamiltonian
| {
"content_hash": "e293eb9c2763d1819ff2917eef358136",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 80,
"avg_line_length": 40.926829268292686,
"alnum_prop": 0.50119189511323,
"repo_name": "quantumlib/ReCirq",
"id": "39785d369a3fd03f5e89a4c2804f5fb129999f1a",
"size": "8962",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recirq/hfvqe/objective.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "365"
},
{
"name": "Dockerfile",
"bytes": "300"
},
{
"name": "Jupyter Notebook",
"bytes": "22201"
},
{
"name": "Makefile",
"bytes": "670"
},
{
"name": "Python",
"bytes": "989707"
},
{
"name": "Shell",
"bytes": "2189"
}
],
"symlink_target": ""
} |
import logging
from django.conf import settings
from django.core.cache import cache
from twython import Twython, TwythonAuthError, TwythonError
logger = logging.getLogger(__name__)
def get_user_timeline(screen_name):
timeline = cache.get('twitter_timeline')
if not timeline:
try:
twitter = authenticate()
timeline = twitter.get_user_timeline(
screen_name=screen_name,
count=settings.TWITTER_USER_TIMELINE_ITEMS)
cache.set('twitter_timeline', timeline,
settings.TWITTER_CACHE_TIMEOUT)
except TwythonError as e:
logger.error(e)
raise e
return timeline
def authenticate():
twitter = cache.get('twitter')
if not twitter:
try:
twitter = Twython(settings.TWITTER_API_KEY,
settings.TWITTER_API_SECRET,
settings.TWITTER_ACCESS_TOKEN,
settings.TWITTER_ACCESS_TOKEN_SECRET)
cache.set('twitter', twitter, settings.TWITTER_CACHE_TIMEOUT)
except TwythonAuthError as e:
logger.error(e)
raise e
return twitter
| {
"content_hash": "b3d2ecf2d93ecb06a60e7097e050ab82",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 73,
"avg_line_length": 27.022222222222222,
"alnum_prop": 0.5879934210526315,
"repo_name": "kingsdigitallab/kdl-django",
"id": "d8502e086acd3fee8c19d7dfb1536791ad790d34",
"size": "1216",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "twitterhut/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "69770"
},
{
"name": "HTML",
"bytes": "38338"
},
{
"name": "JavaScript",
"bytes": "15238"
},
{
"name": "Python",
"bytes": "1140999"
},
{
"name": "Shell",
"bytes": "2704"
}
],
"symlink_target": ""
} |
import h5py
from pyspark import SparkConf, SparkContext
from pyspark.sql.types import Row, StructField, StructType, StringType, IntegerType, LongType
from datetime import datetime, date, timedelta
import sys
from operator import add
from pyspark.sql import SQLContext
import os
import json
from sparkles.modules.utils.helper import saveFeatures
from os.path import dirname
import argparse
import time
import calendar
# Hash the keys into different interval periods
def keymod(x, start_time, interval):
curr_t = x.created
curr_t = curr_t - start_time
keyindex = int(curr_t / interval)
return (keyindex, 1)
# Transform the final time
def timetr(x, start_time, interval):
dt = int(start_time + x[0] * interval)
# t = (start_time + x[0] * interval) / 1000.0
# dt = datetime.fromtimestamp(t).strftime('%Y-%m-%d %H:%M:%S.%f') # x[0] is keyindex
return (dt, x[1]) # x[1] is the total aggregated count
def main():
conf = SparkConf()
conf.setAppName("Parquet Count 60")
conf.set("spark.jars", "file:/shared_data/spark_jars/hadoop-openstack-3.0.0-SNAPSHOT.jar")
sc = SparkContext(conf=conf)
parser = argparse.ArgumentParser()
parser.add_argument("backend", type=str)
parser.add_argument("helperpath", type=str)
parser.add_argument("shuffle_partitions", type=str)
parser.add_argument("params", type=str)
parser.add_argument("inputs", type=str)
parser.add_argument("features", type=str, nargs='?')
args = parser.parse_args()
# Swift Connection
if(args.backend == 'swift'):
hadoopConf = sc._jsc.hadoopConfiguration()
hadoopConf.set("fs.swift.impl", "org.apache.hadoop.fs.swift.snative.SwiftNativeFileSystem")
hadoopConf.set("fs.swift.service.SparkTest.auth.url", os.environ['OS_AUTH_URL'] + "/tokens")
hadoopConf.set("fs.swift.service.SparkTest.http.port", "8443")
hadoopConf.set("fs.swift.service.SparkTest.auth.endpoint.prefix", "/")
hadoopConf.set("fs.swift.service.SparkTest.region", os.environ['OS_REGION_NAME'])
hadoopConf.set("fs.swift.service.SparkTest.public", "false")
hadoopConf.set("fs.swift.service.SparkTest.tenant", os.environ['OS_TENANT_ID'])
hadoopConf.set("fs.swift.service.SparkTest.username", os.environ['OS_USERNAME'])
hadoopConf.set("fs.swift.service.SparkTest.password", os.environ['OS_PASSWORD'])
helperpath = args.helperpath
sc.addFile(helperpath + "/utils/helper.py") # To import custom modules
shuffle_partitions = args.shuffle_partitions
params = json.loads(args.params)
inputs = json.loads(args.inputs)
features = json.loads(args.features)
start_time_str = str(params['start_time'])
start_time = int(str(calendar.timegm(time.strptime(start_time_str[:-4], '%Y-%m-%d_%H:%M:%S'))) + start_time_str[-3:]) # convert to epoch
end_time_str = str(params['end_time'])
end_time = int(str(calendar.timegm(time.strptime(end_time_str[:-4], '%Y-%m-%d_%H:%M:%S'))) + end_time_str[-3:]) # convert to epoch
interval = float(params['interval'])
filepath = str(inputs[0]) # Provide the complete path
sqlContext = SQLContext(sc)
sqlContext.setConf("spark.sql.shuffle.partitions", shuffle_partitions)
df = sqlContext.read.parquet(filepath)
df.registerTempTable('ORDERS')
df = sqlContext.sql("SELECT created FROM ORDERS WHERE created <" + str(end_time) + " AND created >=" + str(start_time))
rdd = df.map(lambda x: keymod(x, start_time, interval)).reduceByKey(add)
rdd = rdd.sortByKey()
rdd = rdd.map(lambda x: timetr(x, start_time, interval)) # Human readable time
# Generate the Schema for the feature dataframe
schemaString = "timestamp count"
fields_df = []
for field_name in schemaString.split():
if(field_name == 'count'):
fields_df.append(StructField(field_name, IntegerType(), True))
else:
fields_df.append(StructField(field_name, LongType(), True))
schema_rdd = StructType(fields_df)
dfRdd = sqlContext.createDataFrame(rdd, schema_rdd)
saveFeatures(dfRdd, features, params, inputs) # Save as a parquet file and create metadata entry
print(rdd.collect())
sc.stop()
if __name__ == "__main__":
main()
| {
"content_hash": "ab4017ceb1e1f59ed1580c192e356d7a",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 141,
"avg_line_length": 37.41228070175438,
"alnum_prop": 0.6794841735052755,
"repo_name": "CSC-IT-Center-for-Science/spark-analysis",
"id": "3675a8ab6460506a43ff72d75d8b4075b947f360",
"size": "4354",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sparkles/modules/event_count_parquet.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "81175"
}
],
"symlink_target": ""
} |
from future import standard_library
standard_library.install_aliases()
from builtins import object
from configparser import ConfigParser
import os
import io
class MonolitheConfig(object):
def __init__(self, path=None):
self.path = path
self.config = None
self.mapping = None
self.language = 'python'
if self.path:
if not os.path.exists(path):
raise Exception("Could not find path %s" % path)
config = ConfigParser()
config.read(path)
self.set_config(config)
def copy(self):
# duplicate the config parser
conf_data = io.StringIO()
self.config.write(conf_data)
conf_data.seek(0)
new_config_parser = ConfigParser()
new_config_parser.readfp(conf_data)
# create a new MonolitheConfig and give it the duplicate config parser
monolithe_config_copy = MonolitheConfig(path=self.path)
monolithe_config_copy.set_config(new_config_parser)
return monolithe_config_copy
def set_config(self, config):
self.config = config
# vanilla
self.user_vanilla = self.get_option("user_vanilla", "transformer")
# mapping
if not self.path:
return
mapping_path = "%s/mapping.ini" % os.path.dirname(self.path)
if not os.path.exists(self.path):
return
self.mapping = ConfigParser()
self.mapping.read(mapping_path)
def get_option(self, option, section="monolithe", **kwargs):
return self.config.get(section, option, **kwargs)
def set_option(self, option, value, section="monolithe"):
return self.config.set(section, option, value)
def map_attribute(self, rest_name, attribute_name):
if self.mapping is None \
or not self.mapping.has_section(rest_name) \
or not self.mapping.has_option(rest_name, attribute_name):
return attribute_name
return self.mapping.get(rest_name, attribute_name)
| {
"content_hash": "fe7c3b332dddc490d5a0315e7413037e",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 78,
"avg_line_length": 29.73913043478261,
"alnum_prop": 0.6223196881091618,
"repo_name": "nuagenetworks/monolithe",
"id": "e00823061bc3245747c0346fc1c6ef78581d3066",
"size": "3650",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "monolithe/config.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "16165"
},
{
"name": "HTML",
"bytes": "983593"
},
{
"name": "JavaScript",
"bytes": "93413"
},
{
"name": "Python",
"bytes": "415189"
},
{
"name": "Smarty",
"bytes": "184108"
}
],
"symlink_target": ""
} |
"""Module level configuration.
Blocks allows module-wide configuration values to be set using a YAML_
configuration file and `environment variables`_. Environment variables
override the configuration file which in its turn overrides the defaults.
The configuration is read from ``~/.blocksrc`` if it exists. A custom
configuration file can be used by setting the ``BLOCKS_CONFIG`` environment
variable. A configuration file is of the form:
.. code-block:: yaml
data_path: /home/user/datasets
If a setting is not configured and does not provide a default, a
:class:`~.ConfigurationError` is raised when it is
accessed.
Configuration values can be accessed as attributes of
:const:`blocks.config.config`.
>>> from blocks.config import config
>>> print(config.default_seed) # doctest: +SKIP
1
The following configurations are supported:
.. option:: default_seed
The seed used when initializing random number generators (RNGs) such as
NumPy :class:`~numpy.random.RandomState` objects as well as Theano's
:class:`~theano.sandbox.rng_mrg.MRG_RandomStreams` objects. Must be an
integer. By default this is set to 1.
.. option:: recursion_limit
The recursion max depth limit used in
:class:`~blocks.main_loop.MainLoop` as well as in other situations when
deep recursion is required. The most notable example of such a situation
is pickling or unpickling a complex structure with lots of objects, such
as a big Theano computation graph.
.. option:: profile, BLOCKS_PROFILE
A boolean value which determines whether to print profiling information
at the end of a call to :meth:`.MainLoop.run`.
.. option:: log_backend
The backend to use for logging experiments. Defaults to `python`, which
stores the log as a Python object in memory. The other option is
`sqlite`.
.. option:: sqlite_database, BLOCKS_SQLITEDB
The SQLite database file to use.
.. option:: max_blob_size
The maximum size of an object to store in an SQLite database in bytes.
Objects beyond this size will trigger a warning. Defaults to 4 kilobyte.
.. _YAML: http://yaml.org/
.. _environment variables:
https://en.wikipedia.org/wiki/Environment_variable
"""
import logging
import os
import six
import yaml
logger = logging.getLogger(__name__)
NOT_SET = object()
class ConfigurationError(Exception):
"""Error raised when a configuration value is requested but not set."""
pass
class Configuration(object):
def __init__(self):
self.config = {}
def load_yaml(self):
if 'BLOCKS_CONFIG' in os.environ:
yaml_file = os.environ['BLOCKS_CONFIG']
else:
yaml_file = os.path.expanduser('~/.blocksrc')
if os.path.isfile(yaml_file) and os.path.getsize(yaml_file):
with open(yaml_file) as f:
for key, value in yaml.safe_load(f).items():
if key not in self.config:
raise ValueError("Unrecognized config in YAML: {}"
.format(key))
self.config[key]['yaml'] = value
def __getattr__(self, key):
if key == 'config' or key not in self.config:
raise AttributeError
config_setting = self.config[key]
if 'value' in config_setting:
value = config_setting['value']
elif ('env_var' in config_setting and
config_setting['env_var'] in os.environ):
value = os.environ[config_setting['env_var']]
elif 'yaml' in config_setting:
value = config_setting['yaml']
elif 'default' in config_setting:
value = config_setting['default']
else:
raise ConfigurationError("Configuration not set and no default "
"provided: {}.".format(key))
return config_setting['type'](value)
def __setattr__(self, key, value):
if key != 'config' and key in self.config:
self.config[key]['value'] = value
else:
super(Configuration, self).__setattr__(key, value)
def add_config(self, key, type_, default=NOT_SET, env_var=None):
"""Add a configuration setting.
Parameters
----------
key : str
The name of the configuration setting. This must be a valid
Python attribute name i.e. alphanumeric with underscores.
type : function
A function such as ``float``, ``int`` or ``str`` which takes
the configuration value and returns an object of the correct
type. Note that the values retrieved from environment
variables are always strings, while those retrieved from the
YAML file might already be parsed. Hence, the function provided
here must accept both types of input.
default : object, optional
The default configuration to return if not set. By default none
is set and an error is raised instead.
env_var : str, optional
The environment variable name that holds this configuration
value. If not given, this configuration can only be set in the
YAML configuration file.
"""
self.config[key] = {'type': type_}
if env_var is not None:
self.config[key]['env_var'] = env_var
if default is not NOT_SET:
self.config[key]['default'] = default
def bool_(val):
"""Like `bool`, but the string 'False' evaluates to `False`."""
if isinstance(val, six.string_types) and val.lower() == 'false':
return False
return bool(val)
# Define configuration options
config = Configuration()
config.add_config('default_seed', type_=int, default=1)
config.add_config('recursion_limit', type_=int, default=10000)
config.add_config('profile', type_=bool_, default=False,
env_var='BLOCKS_PROFILE')
config.add_config('log_backend', type_=str, default='python')
config.add_config('sqlite_database', type_=str,
default=os.path.expanduser('~/blocks_log.sqlite'),
env_var='BLOCKS_SQLITEDB')
config.add_config('max_blob_size', type_=int, default=4096)
config.load_yaml()
| {
"content_hash": "4797e860f9ea27e813952013bf5c3989",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 76,
"avg_line_length": 35.9364161849711,
"alnum_prop": 0.6459707254302718,
"repo_name": "nke001/attention-lvcsr",
"id": "dceee53a52caa0ce37de6195d1161005759ea405",
"size": "6217",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libs/blocks/blocks/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1288"
},
{
"name": "C",
"bytes": "115643"
},
{
"name": "C++",
"bytes": "48644"
},
{
"name": "Cuda",
"bytes": "529630"
},
{
"name": "Gnuplot",
"bytes": "484"
},
{
"name": "HTML",
"bytes": "617"
},
{
"name": "Makefile",
"bytes": "973"
},
{
"name": "Python",
"bytes": "8713080"
},
{
"name": "Shell",
"bytes": "34244"
},
{
"name": "TeX",
"bytes": "102624"
}
],
"symlink_target": ""
} |
import six
from django.utils.encoding import force_text
try:
from django.utils.functional import keep_lazy
KEEP_LAZY = True
except ImportError:
from django.utils.functional import allow_lazy
KEEP_LAZY = False
def truncate_letters(s, num):
"""
truncates a string to a number of letters, similar to truncate_words
"""
s = force_text(s)
length = int(num)
if len(s) > length:
s = s[:length]
if not s.endswith('...'):
s += '...'
return s
if KEEP_LAZY:
truncate_letters = keep_lazy(six.text_type)(truncate_letters)
else:
truncate_letters = allow_lazy(truncate_letters, six.text_type)
| {
"content_hash": "e9da889cb3e740baf47d31b45963c98b",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 72,
"avg_line_length": 24.51851851851852,
"alnum_prop": 0.6450151057401813,
"repo_name": "kawamon/hue",
"id": "17825807d4e76a95974defffb822df406d6f48fc",
"size": "686",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/django-extensions-1.8.0/django_extensions/utils/text.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "5786"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "Batchfile",
"bytes": "118907"
},
{
"name": "C",
"bytes": "3196521"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "308860"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "1050129"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "10981"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "7312"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "24999718"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "JSONiq",
"bytes": "4"
},
{
"name": "Java",
"bytes": "471854"
},
{
"name": "JavaScript",
"bytes": "28075556"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "Jupyter Notebook",
"bytes": "73168"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Lex",
"bytes": "264449"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1377"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "269655"
},
{
"name": "Mako",
"bytes": "3614942"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "31565"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "1412"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "3204"
},
{
"name": "Python",
"bytes": "76440000"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "95764"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "190718"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TSQL",
"bytes": "10013"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "317058"
},
{
"name": "TypeScript",
"bytes": "1607"
},
{
"name": "VBA",
"bytes": "2884"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "521413"
},
{
"name": "Yacc",
"bytes": "2133855"
}
],
"symlink_target": ""
} |
from optparse import OptionParser
import os
import os.path
import sys
import shutil
import stat
def _nsinstall_internal(argv):
usage = "usage: %prog [options] arg1 [arg2 ...] target-directory"
p = OptionParser(usage=usage)
p.add_option('-D', action="store_true",
help="Create a single directory only")
p.add_option('-t', action="store_true",
help="Preserve time stamp")
p.add_option('-m', action="store",
help="Set mode", metavar="mode")
p.add_option('-d', action="store_true",
help="Create directories in target")
p.add_option('-R', action="store_true",
help="Use relative symbolic links (ignored)")
p.add_option('-L', action="store", metavar="linkprefix",
help="Link prefix (ignored)")
p.add_option('-X', action="append", metavar="file",
help="Ignore a file when installing a directory recursively.")
# The remaining arguments are not used in our tree, thus they're not
# implented.
def BadArg(option, opt, value, parser):
parser.error('option not supported: %s' % opt)
p.add_option('-C', action="callback", metavar="CWD",
callback=BadArg,
help="NOT SUPPORTED")
p.add_option('-o', action="callback", callback=BadArg,
help="Set owner (NOT SUPPORTED)", metavar="owner")
p.add_option('-g', action="callback", callback=BadArg,
help="Set group (NOT SUPPORTED)", metavar="group")
(options, args) = p.parse_args(argv)
if options.m:
# mode is specified
try:
options.m = int(options.m, 8)
except:
sys.stderr.write('nsinstall: ' + options.m + ' is not a valid mode\n')
return 1
# just create one directory?
def maybe_create_dir(dir, mode, try_again):
dir = os.path.abspath(dir)
if os.path.exists(dir):
if not os.path.isdir(dir):
print >> sys.stderr, ('nsinstall: %s is not a directory' % dir)
return 1
if mode:
os.chmod(dir, mode)
return 0
try:
if mode:
os.makedirs(dir, mode)
else:
os.makedirs(dir)
except Exception, e:
# We might have hit EEXIST due to a race condition (see bug 463411) -- try again once
if try_again:
return maybe_create_dir(dir, mode, False)
print >> sys.stderr, ("nsinstall: failed to create directory %s: %s" % (dir, e))
return 1
else:
return 0
if options.X:
options.X = [os.path.abspath(p) for p in options.X]
if options.D:
return maybe_create_dir(args[0], options.m, True)
# nsinstall arg1 [...] directory
if len(args) < 2:
p.error('not enough arguments')
def copy_all_entries(entries, target):
for e in entries:
e = os.path.abspath(e)
if options.X and e in options.X:
continue
dest = os.path.join(target, os.path.basename(e))
dest = os.path.abspath(dest)
handleTarget(e, dest)
if options.m:
os.chmod(dest, options.m)
# set up handler
if options.d:
# we're supposed to create directories
def handleTarget(srcpath, targetpath):
# target directory was already created, just use mkdir
os.mkdir(targetpath)
else:
# we're supposed to copy files
def handleTarget(srcpath, targetpath):
if os.path.isdir(srcpath):
if not os.path.exists(targetpath):
os.mkdir(targetpath)
entries = [os.path.join(srcpath, e) for e in os.listdir(srcpath)]
copy_all_entries(entries, targetpath)
# options.t is not relevant for directories
if options.m:
os.chmod(targetpath, options.m)
else:
if os.path.exists(targetpath):
# On Windows, read-only files can't be deleted
os.chmod(targetpath, stat.S_IWUSR)
os.remove(targetpath)
if options.t:
shutil.copy2(srcpath, targetpath)
else:
shutil.copy(srcpath, targetpath)
# the last argument is the target directory
target = args.pop()
# ensure target directory (importantly, we do not apply a mode to the directory
# because we want to copy files into it and the mode might be read-only)
rv = maybe_create_dir(target, None, True)
if rv != 0:
return rv
copy_all_entries(args, target)
return 0
# nsinstall as a native command is always UTF-8
def nsinstall(argv):
return _nsinstall_internal([unicode(arg, "utf-8") for arg in argv])
if __name__ == '__main__':
# sys.argv corrupts characters outside the system code page on Windows
# <http://bugs.python.org/issue2128>. Use ctypes instead. This is also
# useful because switching to Unicode strings makes python use the wide
# Windows APIs, which is what we want here since the wide APIs normally do a
# better job at handling long paths and such.
if sys.platform == "win32":
import ctypes
from ctypes import wintypes
GetCommandLine = ctypes.windll.kernel32.GetCommandLineW
GetCommandLine.argtypes = []
GetCommandLine.restype = wintypes.LPWSTR
CommandLineToArgv = ctypes.windll.shell32.CommandLineToArgvW
CommandLineToArgv.argtypes = [wintypes.LPWSTR, ctypes.POINTER(ctypes.c_int)]
CommandLineToArgv.restype = ctypes.POINTER(wintypes.LPWSTR)
argc = ctypes.c_int(0)
argv_arr = CommandLineToArgv(GetCommandLine(), ctypes.byref(argc))
# The first argv will be "python", the second will be the .py file
argv = argv_arr[1:argc.value]
else:
# For consistency, do it on Unix as well
if sys.stdin.encoding is not None:
argv = [unicode(arg, sys.stdin.encoding) for arg in sys.argv]
else:
argv = [unicode(arg) for arg in sys.argv]
sys.exit(_nsinstall_internal(argv[1:]))
| {
"content_hash": "2cacf4ea254a013c6af08851bb7171ec",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 91,
"avg_line_length": 34.09580838323353,
"alnum_prop": 0.6413768879522305,
"repo_name": "sergecodd/FireFox-OS",
"id": "b0a81d17d983a14542182c439c64efd9b2384668",
"size": "6259",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "B2G/gecko/js/src/config/nsinstall.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Ada",
"bytes": "443"
},
{
"name": "ApacheConf",
"bytes": "85"
},
{
"name": "Assembly",
"bytes": "5123438"
},
{
"name": "Awk",
"bytes": "46481"
},
{
"name": "Batchfile",
"bytes": "56250"
},
{
"name": "C",
"bytes": "101720951"
},
{
"name": "C#",
"bytes": "38531"
},
{
"name": "C++",
"bytes": "148896543"
},
{
"name": "CMake",
"bytes": "23541"
},
{
"name": "CSS",
"bytes": "2758664"
},
{
"name": "DIGITAL Command Language",
"bytes": "56757"
},
{
"name": "Emacs Lisp",
"bytes": "12694"
},
{
"name": "Erlang",
"bytes": "889"
},
{
"name": "FLUX",
"bytes": "34449"
},
{
"name": "GLSL",
"bytes": "26344"
},
{
"name": "Gnuplot",
"bytes": "710"
},
{
"name": "Groff",
"bytes": "447012"
},
{
"name": "HTML",
"bytes": "43343468"
},
{
"name": "IDL",
"bytes": "1455122"
},
{
"name": "Java",
"bytes": "43261012"
},
{
"name": "JavaScript",
"bytes": "46646658"
},
{
"name": "Lex",
"bytes": "38358"
},
{
"name": "Logos",
"bytes": "21054"
},
{
"name": "Makefile",
"bytes": "2733844"
},
{
"name": "Matlab",
"bytes": "67316"
},
{
"name": "Max",
"bytes": "3698"
},
{
"name": "NSIS",
"bytes": "421625"
},
{
"name": "Objective-C",
"bytes": "877657"
},
{
"name": "Objective-C++",
"bytes": "737713"
},
{
"name": "PHP",
"bytes": "17415"
},
{
"name": "Pascal",
"bytes": "6780"
},
{
"name": "Perl",
"bytes": "1153180"
},
{
"name": "Perl6",
"bytes": "1255"
},
{
"name": "PostScript",
"bytes": "1139"
},
{
"name": "PowerShell",
"bytes": "8252"
},
{
"name": "Protocol Buffer",
"bytes": "26553"
},
{
"name": "Python",
"bytes": "8453201"
},
{
"name": "Ragel in Ruby Host",
"bytes": "3481"
},
{
"name": "Ruby",
"bytes": "5116"
},
{
"name": "Scilab",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "3383832"
},
{
"name": "SourcePawn",
"bytes": "23661"
},
{
"name": "TeX",
"bytes": "879606"
},
{
"name": "WebIDL",
"bytes": "1902"
},
{
"name": "XSLT",
"bytes": "13134"
},
{
"name": "Yacc",
"bytes": "112744"
}
],
"symlink_target": ""
} |
''' Significant lifting from https://jmetzen.github.io/2015-11-27/vae.html '''
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import re, string
from sklearn.feature_extraction.text import CountVectorizer
def load_text():
fname = 'Oxford_English_Dictionary.txt'
txt = []
with open(fname) as f:
txt = f.readlines()
txt = [x.strip() for x in txt]
txt = [re.sub(r'[^a-zA-Z ]+', '', x) for x in txt if len(x) > 1]
# List of words
word_list = [x.split(' ', 1)[0].strip() for x in txt]
# List of definitions
def_list = [x.split(' ', 1)[1].strip()for x in txt]
# Initialize the "CountVectorizer" object, which is scikit-learn's
# bag of words tool.
vectorizer = CountVectorizer(analyzer = "word", \
tokenizer = None, \
preprocessor = None, \
stop_words = None, \
max_features = None, \
token_pattern='\\b\\w+\\b') # Keep single character words
vectorizer.fit(word_list+def_list)
# X = (36665, 56210)
X = vectorizer.transform(word_list).toarray()
# y = (36665, 56210)
y = vectorizer.transform(def_list).toarray()
return X, y
def xavier_init(fan_in, fan_out, constant=1):
""" Xavier initialization of network weights"""
# https://stackoverflow.com/questions/33640581/how-to-do-xavier-initialization-on-tensorflow
low = -constant*np.sqrt(6.0/(fan_in + fan_out))
high = constant*np.sqrt(6.0/(fan_in + fan_out))
return tf.random_uniform((fan_in, fan_out),
minval=low, maxval=high,
dtype=tf.float32)
class VariationalAutoencoder(object):
""" Variation Autoencoder (VAE) with an sklearn-like interface implemented using TensorFlow.
This implementation uses probabilistic encoders and decoders using Gaussian
distributions and realized by multi-layer perceptrons. The VAE can be learned
end-to-end.
See "Auto-Encoding Variational Bayes" by Kingma and Welling for more details.
"""
def __init__(self, network_architecture, transfer_fct=tf.nn.softplus,
learning_rate=0.001, batch_size=100):
self.network_architecture = network_architecture
self.transfer_fct = transfer_fct
self.learning_rate = learning_rate
self.batch_size = batch_size
# tf Graph input
self.x = tf.placeholder(tf.float32, [None, network_architecture["n_input"]])
self.y = tf.placeholder(tf.float32, [None, network_architecture["n_input"]])
# Create autoencoder network
self._create_network()
# Define loss function based variational upper-bound and
# corresponding optimizer
self._create_loss_optimizer()
# Initializing the tensor flow variables
init = tf.global_variables_initializer()
# Launch the session
self.sess = tf.InteractiveSession()
self.sess.run(init)
def _create_network(self):
# Initialize autoencode network weights and biases
network_weights = self._initialize_weights(**self.network_architecture)
# Use recognition network to determine mean and
# (log) variance of Gaussian distribution in latent
# space
self.z_mean, self.z_log_sigma_sq = \
self._recognition_network(network_weights["weights_recog"],
network_weights["biases_recog"])
# Draw one sample z from Gaussian distribution
n_z = self.network_architecture["n_z"]
eps = tf.random_normal((self.batch_size, n_z), 0, 1,
dtype=tf.float32)
# z = mu + sigma*epsilon
self.z = tf.add(self.z_mean,
tf.mul(tf.sqrt(tf.exp(self.z_log_sigma_sq)), eps))
# Use generator to determine mean of
# Bernoulli distribution of reconstructed input
self.x_reconstr_mean = \
self._generator_network(network_weights["weights_gener"],
network_weights["biases_gener"])
def _initialize_weights(self, n_hidden_recog_1, n_hidden_recog_2,
n_hidden_gener_1, n_hidden_gener_2,
n_input, n_z):
all_weights = dict()
all_weights['weights_recog'] = {
'h1': tf.Variable(xavier_init(n_input, n_hidden_recog_1)),
'h2': tf.Variable(xavier_init(n_hidden_recog_1, n_hidden_recog_2)),
'out_mean': tf.Variable(xavier_init(n_hidden_recog_2, n_z)),
'out_log_sigma': tf.Variable(xavier_init(n_hidden_recog_2, n_z))}
all_weights['biases_recog'] = {
'b1': tf.Variable(tf.zeros([n_hidden_recog_1], dtype=tf.float32)),
'b2': tf.Variable(tf.zeros([n_hidden_recog_2], dtype=tf.float32)),
'out_mean': tf.Variable(tf.zeros([n_z], dtype=tf.float32)),
'out_log_sigma': tf.Variable(tf.zeros([n_z], dtype=tf.float32))}
all_weights['weights_gener'] = {
'h1': tf.Variable(xavier_init(n_z, n_hidden_gener_1)),
'h2': tf.Variable(xavier_init(n_hidden_gener_1, n_hidden_gener_2)),
'out_mean': tf.Variable(xavier_init(n_hidden_gener_2, n_input)),
'out_log_sigma': tf.Variable(xavier_init(n_hidden_gener_2, n_input))}
all_weights['biases_gener'] = {
'b1': tf.Variable(tf.zeros([n_hidden_gener_1], dtype=tf.float32)),
'b2': tf.Variable(tf.zeros([n_hidden_gener_2], dtype=tf.float32)),
'out_mean': tf.Variable(tf.zeros([n_input], dtype=tf.float32)),
'out_log_sigma': tf.Variable(tf.zeros([n_input], dtype=tf.float32))}
return all_weights
def _recognition_network(self, weights, biases):
# Generate probabilistic encoder (recognition network), which
# maps inputs onto a normal distribution in latent space.
# The transformation is parametrized and can be learned.
layer_1 = self.transfer_fct(tf.add(tf.matmul(self.x, weights['h1']),
biases['b1']))
layer_2 = self.transfer_fct(tf.add(tf.matmul(layer_1, weights['h2']),
biases['b2']))
z_mean = tf.add(tf.matmul(layer_2, weights['out_mean']),
biases['out_mean'])
z_log_sigma_sq = \
tf.add(tf.matmul(layer_2, weights['out_log_sigma']),
biases['out_log_sigma'])
return (z_mean, z_log_sigma_sq)
def _generator_network(self, weights, biases):
# Generate probabilistic decoder (decoder network), which
# maps points in latent space onto a Bernoulli distribution in data space.
# The transformation is parametrized and can be learned.
layer_1 = self.transfer_fct(tf.add(tf.matmul(self.z, weights['h1']),
biases['b1']))
layer_2 = self.transfer_fct(tf.add(tf.matmul(layer_1, weights['h2']),
biases['b2']))
x_reconstr_mean = \
tf.nn.sigmoid(tf.add(tf.matmul(layer_2, weights['out_mean']),
biases['out_mean']))
return x_reconstr_mean
def _create_loss_optimizer(self):
# The loss is composed of two terms:
# 1.) The reconstruction loss (the negative log probability
# of the input under the reconstructed Bernoulli distribution
# induced by the decoder in the data space).
# This can be interpreted as the number of "nats" required
# for reconstructing the input when the activation in latent
# is given.
# Adding 1e-10 to avoid evaluation of log(0.0)
reconstr_loss = \
-tf.reduce_sum(self.y * tf.log(1e-10 + self.x_reconstr_mean)
+ (1-self.y) * tf.log(1e-10 + 1 - self.x_reconstr_mean),
1)
# 2.) The latent loss, which is defined as the Kullback Leibler divergence
## between the distribution in latent space induced by the encoder on
# the data and some prior. This acts as a kind of regularizer.
# This can be interpreted as the number of "nats" required
# for transmitting the the latent space distribution given
# the prior.
latent_loss = -0.5 * tf.reduce_sum(1 + self.z_log_sigma_sq
- tf.square(self.z_mean)
- tf.exp(self.z_log_sigma_sq), 1)
self.cost = tf.reduce_mean(reconstr_loss + latent_loss) # average over batch
# Use ADAM optimizer
self.optimizer = \
tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.cost)
def partial_fit(self, X):
"""Train model based on mini-batch of input data.
Return cost of mini-batch.
"""
opt, cost = self.sess.run((self.optimizer, self.cost),
feed_dict={self.x: X, self.y: y})
return cost
def transform(self, X):
"""Transform data by mapping it into the latent space."""
# Note: This maps to mean of distribution, we could alternatively
# sample from Gaussian distribution
return self.sess.run(self.z_mean, feed_dict={self.x: X, self.y: y})
def generate(self, z_mu=None):
""" Generate data by sampling from latent space.
If z_mu is not None, data for this point in latent space is
generated. Otherwise, z_mu is drawn from prior in latent
space.
"""
if z_mu is None:
z_mu = np.random.normal(size=self.network_architecture["n_z"])
# Note: This maps to mean of distribution, we could alternatively
# sample from Gaussian distribution
return self.sess.run(self.x_reconstr_mean,
feed_dict={self.z: z_mu})
def reconstruct(self, X):
""" Use VAE to reconstruct given data. """
return self.sess.run(self.x_reconstr_mean,
feed_dict={self.x: X, self.y: Y})
def train(network_architecture, learning_rate=0.001,
batch_size=100, training_epochs=10, display_step=5):
vae = VariationalAutoencoder(network_architecture,
learning_rate=learning_rate,
batch_size=batch_size)
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(n_samples / batch_size)
# Loop over all batches
for i in range(total_batch):
batch_xs = X[:n_samples, :]
# Fit training using batch data
cost = vae.partial_fit(batch_xs)
# Compute average loss
avg_cost += cost / n_samples * batch_size
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1),
"cost=", "{:.9f}".format(avg_cost))
return vae
if __name__ == "__main__":
X, y = load_text()
n_input = X.shape[1]
n_samples = 500
X, y = X[:n_samples, :], y[:n_samples, :]
network_architecture = \
dict(n_hidden_recog_1=500, # 1st layer encoder neurons
n_hidden_recog_2=500, # 2nd layer encoder neurons
n_hidden_gener_1=500, # 1st layer decoder neurons
n_hidden_gener_2=500, # 2nd layer decoder neurons
n_input=n_input, # One hot encoding input
n_z=20) # dimensionality of latent space
vae_2d = train(network_architecture, training_epochs=75, batch_size=500)
x_sample = X
y_sample = y
z_mu = vae_2d.transform(x_sample)
plt.figure(figsize=(8, 6))
plt.scatter(z_mu[:, 0], z_mu[:, 1], c=np.argmax(y_sample, 1))
plt.colorbar()
plt.grid()
plt.show()
| {
"content_hash": "0f39d4bc7e31a36902cfe30a304f325f",
"timestamp": "",
"source": "github",
"line_count": 275,
"max_line_length": 96,
"avg_line_length": 43.77090909090909,
"alnum_prop": 0.5739802276314696,
"repo_name": "dricciardelli/vae2vec",
"id": "5dae53d12c27089378a2a6798922c75c98cd6cda",
"size": "12037",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "VAE_sandbox.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "85156"
},
{
"name": "Python",
"bytes": "1292042"
},
{
"name": "Shell",
"bytes": "182"
}
],
"symlink_target": ""
} |
"""A time estimator by running TensorFlow operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import logging
import tensorflow as tf
import numpy as np
from six.moves import range
from paleo.profilers.base import BaseProfiler, TimeMeasure
class TensorFlowProfiler(BaseProfiler):
def __init__(self, options, device='/gpu:0'):
super(TensorFlowProfiler, self).__init__('TensorFlowProfiler', options)
self._device = device
self._logger.info('TensorFlow version: %s' % tf.__version__)
def profile(self, layer):
graph = tf.Graph()
ops, bwd_ops = None, None
if layer.layertype == 'conv2d':
ops, bwd_ops = self._ops_conv2d(layer, graph)
elif layer.layertype == 'innerproduct':
ops, bwd_ops = self._ops_innerproduct(layer, graph)
elif layer.layertype == 'pool2d':
ops, bwd_ops = self._ops_pool2d(layer, graph)
elif layer.layertype == 'dropout':
ops, bwd_ops = self._ops_dropout(layer, graph)
elif layer.layertype == 'concat':
ops, bwd_ops = self._ops_concat(layer, graph)
elif layer.layertype == 'reshape':
ops, bwd_ops = self._ops_reshape(layer, graph)
else:
self._logger.warning('Unimplemented \'%s\'' % layer.layertype)
return self._execute(ops, bwd_ops, graph)
def profile_full_pass(self, layers):
graph, end_points, variables = self._compose_full_graph(layers)
# Forward pass.
if layers[-1].layertype in ['softmax', 'sigmoid']:
last_op = end_points[layers[-2].name]
loss_op = end_points[layers[-1].name]
else:
last_op = end_points[layers[-1].name]
loss_op = None
forward_time = self._execute(last_op, None, graph)
# Backward pass.
softmax_time = TimeMeasure()
backward_time = TimeMeasure()
if loss_op is not None:
softmax_time = self._execute(loss_op, None, graph)
with graph.as_default():
grad_op = tf.gradients(loss_op, variables)
backward_time = self._execute(grad_op, None, graph)
backward_time = backward_time - softmax_time
softmax_time = softmax_time - forward_time
return forward_time, softmax_time, backward_time
def _compose_full_graph(self, layers):
graph = tf.Graph()
end_points = dict() # collects out tensors for each layer
variables = [None] # collects trainable variables
for layer in layers:
if layer.layertype == 'conv2d':
ops, _ = self._ops_conv2d(layer, graph, end_points, variables)
elif layer.layertype == 'deconv2d':
ops, _ = self._ops_deconv2d(layer, graph, end_points,
variables)
elif layer.layertype == 'innerproduct':
ops, _ = self._ops_innerproduct(layer, graph, end_points,
variables)
elif layer.layertype == 'pool2d':
ops, _ = self._ops_pool2d(layer, graph, end_points)
elif layer.layertype == 'upsampling2d':
ops, _ = self._ops_upsampling2d(layer, graph, end_points)
elif layer.layertype == 'dropout':
ops, _ = self._ops_dropout(layer, graph, end_points)
elif layer.layertype == 'concat':
ops, _ = self._ops_concat(layer, graph, end_points)
elif layer.layertype == 'reshape':
ops, _ = self._ops_reshape(layer, graph, end_points)
elif layer.layertype == 'softmax':
ops, _ = self._ops_softmax(layer, graph, end_points)
elif layer.layertype == 'sigmoid':
ops, _ = self._ops_sigmoid(layer, graph, end_points)
elif layer.layertype == 'input':
# skip data/input layer.
continue
else:
raise NotImplementedError('Cannot create ops for layer %s [%s]'
% (layer.name, layer.layertype))
end_points[layer.name] = ops
return graph, end_points, variables[1:]
def _get_inputs(self, layer, end_points=None):
if end_points is None or layer.parents[0] == 'data':
# Isolation mode: inputs for the layer are random constants.
inputs = tf.constant(
2 * np.random.random_sample(layer.inputs) - 1,
dtype=tf.float32,
name="fake_inputs")
return inputs
else:
# Chain mode: get inputs from parent layer outputs.
inputs = [end_points[p] for p in layer.parents]
if len(inputs) == 1:
return inputs[0]
return inputs
def _get_variable(self, shape, name='constant'):
return tf.Variable(
tf.truncated_normal(
shape, dtype=tf.float32, stddev=1e-1),
name='rand_{}'.format(name))
def _get_fake_targets(self, batch_size, num_classes):
labels = np.random.randint(0, num_classes, batch_size)
return tf.constant(labels, dtype=tf.int32, name='fake_targets')
def _ops_conv2d(self, layer, graph, end_points=None, variables=None):
with graph.as_default():
with tf.device(self._device):
inputs = self._get_inputs(layer, end_points)
filters = self._get_variable(layer.filters, name='filters')
if variables:
variables.append(filters)
conv = None
if self.options.direction == 'forward':
conv = tf.nn.conv2d(
inputs, filters, layer.strides, padding=layer.padding)
bwd_inputs_op, bwd_filter_op = None, None
if self.options.direction == 'backward':
if self.options.gradient_wrt == 'data' and layer.backprop:
bwd_inputs_op = tf.nn.conv2d_backprop_input(
layer.inputs,
filters,
self._get_variable(
layer.outputs, name='outputs'),
layer.strides,
layer.padding)
elif self.options.gradient_wrt == 'filter':
bwd_filter_op = tf.nn.conv2d_backprop_filter(
inputs, layer.filters,
self._get_variable(layer.outputs, 'outputs'),
layer.strides, layer.padding)
return conv, [bwd_inputs_op, bwd_filter_op]
def _ops_deconv2d(self, layer, graph, end_points=None, variables=None):
with graph.as_default():
with tf.device(self._device):
inputs = self._get_inputs(layer, end_points)
filters = self._get_variable(layer.filters, name='filters')
if variables:
variables.append(filters)
deconv = tf.nn.conv2d_transpose(
inputs,
filters,
output_shape=layer.outputs,
strides=layer.strides)
return deconv, None
def _ops_innerproduct(self, layer, graph, end_points=None, variables=None):
with graph.as_default():
with tf.device(self._device):
inputs = self._get_inputs(layer, end_points)
weights = self._get_variable(layer.weights, name='weights')
if variables:
variables.append(weights)
innerprod = tf.matmul(inputs, weights)
return innerprod, None
def _ops_pool2d(self, layer, graph, end_points=None):
with graph.as_default():
with tf.device(self._device):
inputs = self._get_inputs(layer, end_points)
if layer.pool_type == 'max':
pool_op = tf.nn.max_pool
elif layer.pool_type == 'avg':
pool_op = tf.nn.avg_pool
else:
raise NotImplementedError('Invalid pool type: %s' %
layer.pool_type)
pool = pool_op(
inputs, layer.kernel, layer.strides, padding=layer.padding)
return pool, None
def _ops_upsampling2d(self, layer, graph, end_points=None):
with graph.as_default():
with tf.device(self._device):
inputs = self._get_inputs(layer, end_points)
upsampling = tf.image.resize_nearest_neighbor(
inputs, layer.outputs[1:3])
return upsampling, None
def _ops_dropout(self, layer, graph, end_points=None):
with graph.as_default():
with tf.device(self._device):
inputs = self._get_inputs(layer, end_points)
dropout = tf.nn.dropout(inputs, layer.keep_prob)
return dropout, None
def _ops_concat(self, layer, graph, end_points=None):
with graph.as_default():
with tf.device(self._device):
if end_points:
inputs = self._get_inputs(layer, end_points)
else:
inputs = [tf.Variable(tf.random_normal(inp))
for inp in layer.inputs]
concat = tf.concat(layer.dim, inputs)
return concat, None
def _ops_reshape(self, layer, graph, end_points=None):
with graph.as_default():
with tf.device(self._device):
inputs = self._get_inputs(layer, end_points)
reshape = tf.reshape(inputs, layer.outputs)
return reshape, None
def _ops_softmax(self, layer, graph, end_points=None):
# For simplicity, here combine softmax and loss
with graph.as_default():
with tf.device(self._device):
inputs = self._get_inputs(layer, end_points)
loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
tf.squeeze(inputs), self._get_fake_targets(
layer.outputs[0], layer.outputs[1])))
return loss, None
def _ops_sigmoid(self, layer, graph, end_points=None):
with graph.as_default():
with tf.device(self._device):
inputs = self._get_inputs(layer, end_points)
loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(inputs, tf.zeros(
layer.outputs)))
return loss, None
def _execute(self, layer_ops, bwd_ops, graph):
with graph.as_default():
with tf.device(self._device):
config = tf.ConfigProto(
allow_soft_placement=False,
log_device_placement=(
self._logger.getEffectiveLevel() == logging.DEBUG),
graph_options=tf.GraphOptions(
optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L0)))
ops_to_run = None
if self.options.direction == 'forward':
if layer_ops is None:
return TimeMeasure()
if isinstance(layer_ops, list):
target_fwd_op = [tf.group(op) for op in layer_ops]
else:
shape = tf.shape(layer_ops)
target_fwd_op = tf.group(shape)
ops_to_run = target_fwd_op
elif self.options.direction == 'backward':
if bwd_ops is None:
return TimeMeasure()
else:
if self.options.gradient_wrt == 'data':
target = bwd_ops[0]
elif self.options.gradient_wrt == 'filter':
target = bwd_ops[1]
else:
self._logger.warning(
'TensorFlowProfiler cannot run two'
'backward ops for now.')
return TimeMeasure()
if target is None:
return TimeMeasure()
target_bwd_op = tf.group(tf.shape(target))
ops_to_run = target_bwd_op
init = tf.initialize_all_variables()
# Create a session and initialize variables.
with tf.Session(config=config) as sess:
# writer = tf.train.SummaryWriter('logs/', sess.graph)
sess.run(init)
# Run the ops.
durations = []
for i in range(self.options.num_warmup +
self.options.num_iter):
start_time = time.time()
sess.run(ops_to_run)
duration = time.time() - start_time
if i >= self.options.num_warmup:
# Mesure time in milliseconds.
durations.append(duration * (10**3))
mean_time = np.mean(durations)
tf.reset_default_graph()
return TimeMeasure(total_time=mean_time)
| {
"content_hash": "ddb9e806c67698c5c84f6e7b074fd33d",
"timestamp": "",
"source": "github",
"line_count": 320,
"max_line_length": 79,
"avg_line_length": 42.465625,
"alnum_prop": 0.514386636249908,
"repo_name": "TalwalkarLab/paleo",
"id": "9fcb057df310dca264aa9276118dcd84fb8fef5c",
"size": "13589",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "paleo/profilers/tensorflow_profiler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "127"
},
{
"name": "Python",
"bytes": "155511"
},
{
"name": "Shell",
"bytes": "5739"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
from django.conf import settings
from django.core.files import File
from protein.models import ProteinFamily, ProteinAlias, ProteinSet, Protein, ProteinSegment
from common.views import AbsTargetSelection
from common.views import AbsSegmentSelection
from common.views import AbsMiscSelection
from common.selection import SelectionItem
from mutation.models import *
import math
import os, shutil, subprocess, signal
import uuid
from phylogenetic_trees.PrepareTree import *
from collections import OrderedDict
Alignment = getattr(__import__('common.alignment_' + settings.SITE_NAME, fromlist=['Alignment']), 'Alignment')
def kill_phylo(): #FIXME, needs better way of handling this!
p = subprocess.Popen(['ps', '-A'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.splitlines():
if 'protdist' in str(line):
pid = int(line.split(None, 1)[0])
os.kill(pid, signal.SIGKILL)
class TargetSelection(AbsTargetSelection):
step = 1
number_of_steps = 3
docs = 'sequences.html#phylogeneric-trees'
selection_boxes = OrderedDict([
('targets', True),
('segments', False),
])
buttons = {
'continue': {
'label': 'Continue to next step',
'url': '/phylogenetic_trees/segmentselection',
'color': 'success',
},
}
class SegmentSelection(AbsSegmentSelection):
step = 2
number_of_steps = 3
docs = 'sequences.html#phylogeneric-trees'
selection_boxes = OrderedDict([
('targets', True),
('segments', True),
])
buttons = {
'continue': {
'label': 'Continue to next step',
'url': '/phylogenetic_trees/treesettings',
'color': 'success',
},
}
class TreeSettings(AbsMiscSelection):
step = 3
number_of_steps = 3
docs = 'sequences.html#phylogeneric-trees'
title = 'SELECT TREE OPTIONS'
description = 'Select options for tree generation in the middle column.\nOnce you have selected your' \
+ ' settings, click the green button.'
docs = '/documentation/similarities'
selection_boxes = OrderedDict([
('targets', True),
('segments', True),
])
buttons = {
'continue': {
'label': 'Draw tree',
'url': '/phylogenetic_trees/render',
'color': 'success',
},
}
tree_settings = True
class Treeclass:
family = {}
def __init__(self):
self.Additional_info={"crystal": {"include":"False", "order":6, "colours":{"crystal_true":"#6dcde1","crystal_false":"#EEE"}, "color_type":"single", "proteins":[], "parent":None, "child": None, "name":"Crystals"},
"class": {"include":"True", "order":0, "colours":{}, "proteins":[], "color_type":"grayscale", "parent":[], "child": ["family,ligand"], "name":"Class"},
"family": {"include":"True", "order":1, "colours":{}, "proteins":[], "color_type":"spectrum", "parent":[], "child": ["ligand"], "name":"Ligand type"},
"ligand": {"include":"True", "order":2, "colours":{}, "proteins":[], "color_type":"spectrum", "parent":["family","class"], "child": [], "name":"Receptor type"},
"mutant": {"include":"False", "order":3, "colours":{"mutant_true":"#6dcde1","mutant_false":"#EEE"}, "color_type":"single", "proteins":[], "parent":[], "child": ["mutant_plus","mutant_minus"], "name":"Mutated proteins"},
"mutant_plus": {"include":"False", "order":4, "colours":{"mutant_plus_true":"#6dcde1","mutant_plus_false":"#EEE"}, "color_type":"single", "proteins":[], "parent":"mutant", "child": [], "name":"Positive affinity mutants"},
"mutant_minus": {"include":"False", "order":5, "colours":{"mutant_minus_true":"#6dcde1","mutant_minus_false":"#EEE"}, "color_type":"single", "proteins":[], "parent":"mutant", "child": [], "name":"Negative affinity mutants"}
}
self.buttons = [(x[1]['order'],x[1]['name']) for x in sorted(self.Additional_info.items(), key= lambda x: x[1]['order']) if x[1]['include']=='True']
self.family = {}
self.phylip = None
self.outtree = None
self.dir = ''
def Prepare_file(self, request,build=False):
self.Tree = PrepareTree(build)
a=Alignment()
sets = ProteinSet.objects.all()
#### Get additional data ####
crysts=[]
for n in sets:
if n.id==1:
for prot in n.proteins.all():
crysts.append(prot.entry_name)
#############################
# get the user selection from session
if build != False:
################################## FOR BUILDING STATISTICS ONLY##########################
build_proteins=[]
if build == '001':
cons_prots = []
for prot in Protein.objects.filter(sequence_type__slug='consensus', species_id=1):
if prot.family.slug.startswith('001') and len(prot.family.slug.split('_'))==3:
build_proteins.append(prot)
for set in sets:
if set.id==1:
for prot in set.proteins.all():
if prot.family.slug.startswith('001_') and prot.species.latin_name=='Homo sapiens':
build_proteins.append(prot)
else:
for prot in Protein.objects.filter(sequence_type__slug='wt', species_id=1):
if prot.family.slug.startswith(build):
build_proteins.append(prot)
a.load_proteins(build_proteins)
segments = ProteinSegment.objects.all()
a.load_segments(segments)
self.bootstrap,self.UPGMA,self.branches,self.ttype=[0,1,1,0]
##################################################################
else:
simple_selection=request.session.get('selection', False)
a.load_proteins_from_selection(simple_selection)
a.load_segments_from_selection(simple_selection)
self.bootstrap,self.UPGMA,self.branches,self.ttype = map(int,simple_selection.tree_settings)
if self.bootstrap!=0:
self.bootstrap=pow(10,self.bootstrap)
#### Create an alignment object
a.build_alignment()
a.calculate_statistics()
a.calculate_similarity()
self.total = len(a.proteins)
total_length = 0
for chain in a.proteins[0].alignment:
total_length += len(a.proteins[0].alignment[chain])
families = ProteinFamily.objects.all()
self.famdict = {}
for n in families:
self.famdict[self.Tree.trans_0_2_A(n.slug)]=n.name
dirname = unique_filename = uuid.uuid4()
os.mkdir('/tmp/%s' %dirname)
infile = open('/tmp/%s/infile' %dirname,'w')
infile.write(' '+str(self.total)+' '+str(total_length)+'\n')
if len(a.proteins) < 3:
return 'More_prots',None, None, None, None,None,None,None
####Get additional protein information
accesions = {}
for n in a.proteins:
fam = self.Tree.trans_0_2_A(n.protein.family.slug)
if n.protein.sequence_type.slug == 'consensus':
fam+='_CON'
entry_name = n.protein.entry_name
name = n.protein.name.replace('<sub>','').replace('</sub>','').replace('<i>','').replace('</i>','')
if '&' in name and ';' in name:
name = name.replace('&','').replace(';',' ')
acc = n.protein.accession
if acc:
acc = acc.replace('-','_')
else:
acc = link.replace('-','_')[:6]
spec = str(n.protein.species)
fam += '_'+n.protein.species.common_name.replace(' ','_').upper()
desc = name
if entry_name in crysts:
if not fam in self.Additional_info['crystal']['proteins']:
self.Additional_info['crystal']['proteins'].append(fam)
if len(name)>25:
name=name[:25]+'...'
self.family[entry_name] = {'name':name,'family':fam,'description':desc,'species':spec,'class':'','accession':acc,'ligand':'','type':'','link': entry_name}
accesions[acc]=entry_name
####Write PHYLIP input
sequence = ''
for chain in n.alignment:
for residue in n.alignment[chain]:
sequence += residue[2].replace('_','-')
infile.write(acc+' '*9+sequence+'\n')
infile.close()
####Run bootstrap
if self.bootstrap:
### Write phylip input options
inp = open('/tmp/%s/temp' %dirname,'w')
inp.write('\n'.join(['r',str(self.bootstrap),'y','77','y'])+'\n')
inp.close()
###
try:
subprocess.check_output(['phylip seqboot<temp'], shell=True, cwd = '/tmp/%s' %dirname, timeout=60)
os.rename('/tmp/%s/outfile' %dirname, '/tmp/%s/infile' %dirname)
except:
kill_phylo() #FIXME, needs better way of handling this!
return "too big","too big","too big","too big","too big","too big","too big","too big"
### Write phylip input options
inp = open('/tmp/%s/temp' %dirname,'w')
if self.bootstrap:
inp.write('\n'.join(['m','d',str(self.bootstrap),'y'])+'\n')
else:
inp.write('y\n')
inp.close()
###
try:
subprocess.check_output(['phylip protdist<temp>>log'], shell=True, cwd = '/tmp/%s' %dirname, timeout=60)
except:
kill_phylo() #FIXME, needs better way of handling this!
return "too big","too big","too big","too big","too big","too big","too big","too big"
os.rename('/tmp/%s/infile' %dirname, '/tmp/%s/dupa' %dirname)
os.rename('/tmp/%s/outfile' %dirname, '/tmp/%s/infile' %dirname)
inp = open('/tmp/%s/temp' %dirname,'w')
if self.bootstrap:
### Write phylip input options
if self.UPGMA:
inp.write('\n'.join(['N','m',str(self.bootstrap),'111','y'])+'\n')
else:
inp.write('\n'.join(['m',str(self.bootstrap),'111','y'])+'\n')
else:
if self.UPGMA:
inp.write('N\ny\n')
else:
inp.write('y\n')
inp.close()
###
try:
subprocess.check_output(['phylip neighbor<temp'], shell=True, cwd = '/tmp/%s' %dirname, timeout=60)
except:
kill_phylo() #FIXME, needs better way of handling this!
return "too big","too big","too big","too big","too big","too big","too big","too big"
if self.bootstrap:
os.rename('/tmp/%s/outfile' %dirname, '/tmp/%s/infile' %dirname)
os.rename('/tmp/%s/outtree' %dirname, '/tmp/%s/intree' %dirname)
### Write phylip input options
inp = open('/tmp/%s/temp' %dirname,'w')
inp.write('y\n')
inp.close()
###
try:
subprocess.check_output(['phylip consense<temp'], shell=True, cwd = '/tmp/%s' %dirname, timeout=60)
except:
kill_phylo() #FIXME, needs better way of handling this!
return "too big","too big","too big","too big","too big","too big","too big","too big"
self.phylip = open('/tmp/%s/outtree' %dirname).read()
for acc in accesions.keys():
self.phylip=self.phylip.replace(acc,accesions[acc])
# self.phylogeny_output = self.phylip
self.outtree = open('/tmp/%s/outfile' %dirname).read().lstrip()
phylogeny_input = self.get_phylogeny('/tmp/%s/' %dirname)
shutil.rmtree('/tmp/%s' %dirname)
if build != False:
open('static/home/images/'+build+'_legend.svg','w').write(str(self.Tree.legend))
open('static/home/images/'+build+'_tree.xml','w').write(phylogeny_input)
else:
return phylogeny_input, self.branches, self.ttype, self.total, str(self.Tree.legend), self.Tree.box, self.Additional_info, self.buttons
def get_phylogeny(self, dirname):
self.Tree.treeDo(dirname, self.phylip,self.branches,self.family,self.Additional_info, self.famdict)
phylogeny_input = open('%s/out.xml' %dirname,'r').read().replace('\n','')
return phylogeny_input
def get_data(self):
return self.branches, self.ttype, self.total, str(self.Tree.legend), self.Tree.box, self.Additional_info, self.buttons
def get_buttons(request):
Tree_class=request.session['Tree']
buttons = [(x[1]['order'],x[1]['name']) for x in sorted(Tree_class.Additional_info.items(), key= lambda x: x[1]['order']) if x[1]['include']=='True']
return render(request, 'phylogenetic_trees/ring_buttons.html', {'but':buttons })
def modify_tree(request):
try:
shutil.rmtree('/tmp/modify')
except:
pass
arg = request.GET.getlist('arg[]')
value = request.GET.getlist('value[]')
Tree_class=request.session['Tree']
for n in range(len(arg)):
Tree_class.Additional_info[arg[n].replace('_btn','')]['include']=value[n]
request.session['Tree']=Tree_class
os.mkdir('/tmp/modify')
phylogeny_input = Tree_class.get_phylogeny('/tmp/modify')
branches, ttype, total, legend, box, Additional_info, buttons=Tree_class.get_data()
shutil.rmtree('/tmp/modify')
if ttype == '1':
float(total)/4*100
else:
count = 1900 - 1400/math.sqrt(float(total))
print(count)
return render(request, 'phylogenetic_trees/main.html', {'phylo': phylogeny_input, 'branch':branches, 'ttype': ttype, 'count':count, 'leg':legend, 'b':box, 'add':Additional_info, 'but':buttons, 'phylip':Tree_class.phylip, 'outtree':Tree_class.outtree})
def render_tree(request):
Tree_class=Treeclass()
phylogeny_input, branches, ttype, total, legend, box, Additional_info, buttons=Tree_class.Prepare_file(request)
if phylogeny_input == 'too big':
return render(request, 'phylogenetic_trees/too_big.html')
if phylogeny_input == 'More_prots':
return render(request, 'phylogenetic_trees/warning.html')
if ttype == '1':
float(total)/4*100
else:
count = 1900 - 1400/math.sqrt(float(total))
request.session['Tree']=Tree_class
return render(request, 'phylogenetic_trees/alignment.html', {'phylo': phylogeny_input, 'branch':branches, 'ttype': ttype, 'count':count, 'leg':legend, 'b':box, 'add':Additional_info, 'but':buttons, 'phylip':Tree_class.phylip, 'outtree':Tree_class.outtree })
| {
"content_hash": "95d2beeb81fe8818c97cdce1b515e27c",
"timestamp": "",
"source": "github",
"line_count": 327,
"max_line_length": 261,
"avg_line_length": 46.3394495412844,
"alnum_prop": 0.5484722497195275,
"repo_name": "fosfataza/protwis",
"id": "9f7312e3558d4905b1d48f077855d8a6b6d130c5",
"size": "15155",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "phylogenetic_trees/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "104739"
},
{
"name": "HTML",
"bytes": "1426027"
},
{
"name": "JavaScript",
"bytes": "1127392"
},
{
"name": "Python",
"bytes": "2593740"
},
{
"name": "Shell",
"bytes": "386"
}
],
"symlink_target": ""
} |
"""
Requires python-chess
"""
import traceback
import sys
import os
import random
from threading import Thread
from _thread import *
from qboard import QBoard
from client import Client
from server import Server
import chess
import chess.engine
import chess.pgn
import chess.polyglot
import chess.svg
#import chess.uci
from PyQt5.QtCore import Qt, QTime, QTimer, QRectF, QSize
from PyQt5.QtGui import QPixmap, QPainter, QImage, QIcon
from PyQt5.QtWidgets import QApplication, QWidget, QAction, QMainWindow, QVBoxLayout, QHBoxLayout
from PyQt5.QtWidgets import QTabWidget, QFileDialog, QListWidget, QListWidgetItem, QLabel, QInputDialog, QLineEdit
class TabEmpty(QWidget):
def __init__(self, parent, caption):
super().__init__(parent)
self.parent = parent
self.layout = QVBoxLayout()
self.setLayout(self.layout)
if caption==None:
caption = 'Empty Tab'
self.label_caption = QLabel(caption)
self.layout.addWidget(self.label_caption)
def closing(self):
pass
class TabGame(TabEmpty):
def __init__(self, parent, caption):
super().__init__(parent, caption)
self.can_move = True
self.boardWidget = QBoard(self)
self.layout.addWidget(self.boardWidget, 1)
def get_last_move(self):
return None
class TabServer(TabEmpty):
def __init__(self, parent, caption):
super().__init__(parent, caption)
self.list = QListWidget()
self.layout.addWidget(self.list)
self.client = None
self.server = None
def addUser(self, username):
self.list.addItem(QListWidgetItem(username))
def clearUsers(self):
self.list.clear()
def closing(self):
if self.client:
self.client.stop()
self.client = None
if self.server:
self.server.stop()
self.server = None
class CoordLearn(TabGame):
def __init__(self, parent, caption, gametype, color):
super().__init__(parent, caption)
self.gametype = gametype
self.color = color
self.boardWidget.addMousePressListener(self)
self.boardWidget.setBoard(chess.Board())
self.boardWidget.flipped = color==1
self.parent.add_message('**** Click anywhere on the board to start.')
self.timer = QTime()
self.timerStarted = False
def mousePressed(self, square):
if not self.timerStarted:
self.timer.start()
self.timerStarted = True
self.score = 0
self.rand8 = random.randrange(8)
self.rand64 = random.randrange(64)
if self.color == 2:
self.boardWidget.flipped = random.randrange(2)
else:
if self.gametype == 0:
if self.rand8 == square >> 3:
self.score += 1
self.rand8 = random.randrange(8)
if self.color == 2:
self.boardWidget.flipped = random.randrange(2)
else:
self.parent.add_message('X: ' + chess.RANK_NAMES[square >> 3])
elif self.gametype == 1:
if self.rand8 == square & 7:
self.score += 1
self.rand8 = random.randrange(8)
if self.color == 2:
self.boardWidget.flipped = random.randrange(2)
else:
self.parent.add_message('X: ' + chess.FILE_NAMES[square & 7])
elif self.gametype == 2:
if chess.SQUARES[self.rand64] == square:
self.score += 1
self.rand64 = random.randrange(64)
if self.color == 2:
self.boardWidget.flipped = random.randrange(2)
else:
self.parent.add_message('X: ' + chess.SQUARE_NAMES[square])
self.parent.add_message('Score: ' + str(self.score))
if self.gametype == 0: # RANK
self.parent.add_message('**** Find Rank: ' + chess.RANK_NAMES[self.rand8])
self.boardWidget.setText(chess.RANK_NAMES[self.rand8])
elif self.gametype == 1: # File
self.parent.add_message('**** Find File: ' + chess.FILE_NAMES[self.rand8])
self.boardWidget.setText(chess.FILE_NAMES[self.rand8])
elif self.gametype == 2: # Square
self.parent.add_message('**** Find Square: ' + chess.SQUARE_NAMES[self.rand64])
self.boardWidget.setText(chess.SQUARE_NAMES[self.rand64])
def elapsed(self):
return self.timer.elapsed()
class QGame(TabGame):
# widget type
# SHADOW = range(2)
mouseMovePos = None
offset_x = offset_y = 0
winner = True
thread = None
total_score = 0
def __init__(self, parent, chess_game=None, caption = None):
super().__init__(parent, caption)
self.boardWidget.addMoveListener(self)
if chess_game==None:
chess_game = chess.pgn.Game()
self.board_type = 1
self.boardWidget.board_type = 1
else:
self.board_type = 2
self.boardWidget.board_type = 2
self.node = chess_game
self.board = chess_game.board()
self.last_move = None
result = chess_game.headers['Result']
self.flip_board = False
if result =='0-1':
self.winner = False
self.flip_board = True
self.boardWidget.setBoard(self.board, self.flip_board)
self.can_move = self.board.turn==self.winner
if not self.can_move:
game_move = self.get_next_game_move()
if game_move:
self.parent.add_message('Opponent move: ' + self.board.san(game_move))
self.make_move(game_move)
parent.game_state_changed(self)
self.parent.add_message('**** Make move for '+('white' if self.board.turn else 'black'))
self.timer = QTime()
self.timer.start()
# moves = game.main_line()
# print(self.board.variation_san(moves))
def elapsed(self):
return self.timer.elapsed()
'''
def paintEvent(self, e):
painter = QPainter()
painter.begin(self)
# paint board
painter.drawPixmap(0, self.label_caption.height(), self.width, self.height, self.board_map)
# paint last move
if self.last_move:
x = self.cx * ((7 - chess.square_file(self.last_move.from_square)) if self.flip_board else chess.square_file(self.last_move.from_square))
y = self.cy * (chess.square_rank(self.last_move.from_square) if self.flip_board else (7 - chess.square_rank(self.last_move.from_square))) + self.label_caption.height()
painter.drawRect(QRectF(x, y, self.cx, self.cy))
x = self.cx * ((7 - chess.square_file(self.last_move.to_square)) if self.flip_board else chess.square_file(self.last_move.to_square))
y = self.cy * (chess.square_rank(self.last_move.to_square) if self.flip_board else (7 - chess.square_rank(self.last_move.to_square))) + self.label_caption.height()
painter.drawRect(QRectF(x, y, self.cx, self.cy))
try:
# paint pieces
self.paint_pieces(self.board, self.flip_board)
except Exception as ex:
print(ex)
painter.end()
'''
def get_next_game_move(self):
if len(self.node.variations)<1:
return None
next_node = self.node.variations[0]
self.node = next_node
return next_node.move
def get_last_move(self):
lm = None
try:
lm = self.board.peek()
except:
pass
return lm
def compare_user_move_with_game(self, move):
game_move = self.get_next_game_move()
if game_move is None:
return
move_text = self.board.san(move)
self.parent.add_message('Your move: '+move_text+', Game move: '+self.board.san(game_move))
is_book_move = self.parent.is_book_move(self.board, move)
if is_book_move:
opening_name = self.parent.get_opening_name(self.board)
self.parent.add_message(move_text+' (Book move '+opening_name+')')
if move!=game_move and not is_book_move:
board_copy = self.board.copy()
self.thread = Thread(target=self.compare_moves, args=(board_copy, move, game_move))
self.thread.start()
self.make_move(game_move)
def make_move(self, move):
self.last_move = move
self.board.push(move)
self.can_move = self.board.turn==self.winner if self.board_type == 2 else True
self.parent.game_state_changed(self)
def compare_moves(self, board, user_move, game_move):
# evaluate move score
evaluation = self.parent.evaluate_moves(board, [user_move, game_move])
score_diff = evaluation[user_move] - evaluation[game_move]
self.parent.add_message('Move score ('+board.san(user_move)+' vs '+board.san(game_move)+'): '+ str(score_diff))
self.total_score += score_diff
self.parent.add_message('Game score: '+ str(self.total_score))
def evaluate(self, board, move):
# evaluate move score
evaluation = self.parent.evaluate_board(board)[0]
self.parent.add_message('Position Evaluation ('+move+') '+str(evaluation))
# process user move
def userMoved(self, uci_move):
move = chess.Move.from_uci(uci_move)
# is it a legal move?
if move in self.board.legal_moves:
if self.board_type == 2:
self.compare_user_move_with_game(move)
# make opponents move
if not self.can_move:
# make the next game move as well
game_move = self.get_next_game_move()
if game_move is None:
return
self.parent.add_message('Opponent move: ' + self.board.san(game_move))
self.make_move(game_move)
self.parent.add_message('**** Make move for '+('white' if self.board.turn else 'black'))
self.timer.restart()
else:
self.make_move(move)
class GameListItem(QListWidgetItem):
def __init__(self, pgn_offset, pgn_header, index=''):
welo = pgn_header['WhiteElo']
belo = pgn_header['BlackElo']
super().__init__((str(index) + '. ' if index!='' else '') + '[' + pgn_header['Result'] + '] ' + pgn_header['White'] + ' ' + welo + ' vs ' + pgn_header['Black'] + ' ' + belo)
self.offset = pgn_offset
self.header = pgn_header
self.index = index
class OpeningListItem(QListWidgetItem):
def __init__(self, key, value, index=''):
super().__init__((str(index) + '. ' if index!='' else '') + value[0]+' ('+key+')')
self.key = key
self.value = value
class TacticsListItem(QListWidgetItem):
def __init__(self, pgn_offset, pgn_header, index=''):
super().__init__((str(index) + '. ' if index!='' else '') + '[' + pgn_header['Result'] + '] ' + pgn_header['White'] + ' vs ' + pgn_header['Black'])
self.offset = pgn_offset
self.header = pgn_header
self.index = index
class CoordListItem(QListWidgetItem):
# Caption, File/Rank/Position (0/1/2), White/Black/Both (0/1/2)
def __init__(self, caption, gametype, color):
super().__init__(caption)
self.gametype = gametype
self.color = color
# app dimension in pixels
DEFAULT_WIDTH = 1200
DEFAULT_HEIGHT = 800
class App(QMainWindow):
def __init__(self):
super().__init__()
self.openings = {}
self.init_ui()
self.static_board = chess.Board()
self.add_message('initializing opening book...')
self.book = chess.polyglot.open_reader("book.bin")
self.thread = Thread(target=self.init_openings)
self.thread.start()
self.add_message('initializing engine...')
self.engine = chess.engine.SimpleEngine.popen_uci("stockfish")
self.engine_busy = False
self.add_message('Ready')
self.timer = QTimer()
self.timer.timeout.connect(self.tick)
self.timer.start(200)
def init_openings(self):
opening_file = open("ecoe.pgn")
game = chess.pgn.read_game(opening_file)
while game is not None:
chess_board = game.board()
line = chess_board.variation_san(game.mainline_moves())
black_player_name = game.headers['Black']
name = (' (' + black_player_name + ')') if black_player_name != '?' else ''
self.openings[line] = (game.headers['White'] + name, game.mainline_moves())
game = chess.pgn.read_game(opening_file)
self.populate_opening_list()
def get_opening_name(self, board):
san = self.static_board.variation_san(board.move_stack)
if san in self.openings:
return '- '+self.openings[san][0]
return ''
def is_book_move(self, board, move):
return any(move==x.move for x in self.book.find_all(board))
def init_ui(self):
self.statusBar()
self.statusBar().showMessage('Ready')
mm = self.menuBar()
fm = mm.addMenu('&File')
act = QAction("New", self)
act.triggered.connect(self.new_game)
fm.addAction(act)
act = QAction("Shadow..", self)
act.triggered.connect(self.shadow)
fm.addAction(act)
act = QAction("Analyze", self)
act.triggered.connect(self.analyze)
fm.addAction(act)
mnuSever = mm.addMenu('&Server')
act = QAction("&Create Server...", self)
act.triggered.connect(self.createServer)
mnuSever.addAction(act)
act = QAction("&Join Server...", self)
act.triggered.connect(self.joinServer)
mnuSever.addAction(act)
self.games_list = QListWidget()
self.games_list.itemDoubleClicked.connect(self.on_list_dbl_click)
self.opening_list = QListWidget()
self.opening_list.itemDoubleClicked.connect(self.on_opening_list_dbl_click)
self.tactics_list = QListWidget()
self.tactics_list.itemDoubleClicked.connect(self.on_tactics_list_dbl_click)
self.coord_learn = QListWidget()
self.coord_learn.itemDoubleClicked.connect(self.on_coord_learn_dbl_click)
# tabs
self.tabs = QTabWidget()
self.tabs.setTabsClosable(True)
self.tabs.currentChanged.connect(self.tab_changed)
self.tabs.tabCloseRequested.connect(self.close_tab)
self.tabs.addTab(self.games_list, "Games")
self.populate_game_list_from_pgn('games.pgn')
self.tabs.addTab(self.opening_list, "Openings")
self.tabs.addTab(self.tactics_list, "Tactics")
self.populate_tactics_list_from_pgn('tactics.pgn')
self.tabs.addTab(self.coord_learn, "Coordinates")
self.populate_coord_learn_list()
main_widget = QWidget()
main_layout = QHBoxLayout(main_widget)
game_msg_widget = QWidget()
game_msg_layout = QVBoxLayout(game_msg_widget)
game_msg_layout.addWidget(self.tabs, 2)
self.msg_list = QListWidget()
game_msg_layout.addWidget(self.msg_list)
main_layout.addWidget(game_msg_widget, 2)
moves_check_widget = QWidget()
moves_check_layout = QVBoxLayout(moves_check_widget)
self.moves_list = QLabel()
self.moves_list.setWordWrap(True)
moves_check_layout.addWidget(self.moves_list)
self.check_list = QListWidget()
self.populate_check_list()
moves_check_layout.addWidget(self.check_list, 2)
main_layout.addWidget(moves_check_widget)
self.setCentralWidget(main_widget)
self.resize(DEFAULT_WIDTH, DEFAULT_HEIGHT)
self.setWindowTitle('Chess Coach')
self.show()
def tab_changed(self, index):
tab = self.tabs.currentWidget()
if isinstance(tab, QGame):
self.game_state_changed(tab)
def close_tab(self, index):
tab = self.tabs.currentWidget()
if isinstance(tab, TabEmpty):
tab.closing()
self.tabs.removeTab(index)
def game_state_changed(self, qgame):
msg = self.static_board.variation_san(qgame.board.move_stack)
self.moves_list.setText(msg)
def analyze(self):
tab = self.tabs.currentWidget()
if isinstance(tab, QGame):
board_copy = tab.board.copy()
self.thread = Thread(target=self.analyze_board, args=(board_copy,))
self.thread.start()
def analyze_board(self, board):
self.eval_msg = QListWidgetItem('Analyzing Position...')
self.add_message(self.eval_msg)
msg = self.evaluate_board(board)
self.eval_msg = QListWidgetItem('... ('+str(msg[0])+') '+board.variation_san(msg[1]))
self.add_message(self.eval_msg)
def tick(self):
tab = self.tabs.currentWidget()
try:
elapsed = tab.elapsed() / 1000
seconds = int(elapsed)
minutes = seconds/60.0
seconds = seconds % 60
hours = minutes/60.0
minutes = minutes%60
msg = "%02d:%02d:%02d" % (hours, minutes, seconds)
self.statusBar().showMessage(msg)
except Exception as ex:
pass
def populate_check_list(self):
file = open('check_list.txt')
for line in file:
self.check_list.addItem(line.rstrip())
def piece_location(self, piece):
x = int((piece.pos().x() - self.mx) / self.cx)
y = int((piece.pos().y() - self.my) / self.cy)
return chess.FILE_NAMES[x], chess.RANK_NAMES[7 - y]
def new_game(self):
try:
self.tabs.addTab(QGame(self), 'Game')
self.tabs.setCurrentIndex(self.tabs.count()-1)
self.add_message('New Game')
except Exception as ex:
print(ex)
def shadow(self):
try:
fileName, _ = QFileDialog.getOpenFileName(self, 'Get File', None, 'PGN (*.pgn)')
if fileName:
self.populate_game_list_from_pgn(fileName)
except Exception as ex:
print(ex)
def populate_opening_list(self):
self.opening_list.clear()
grouping = ''
index = 1
for k, v in self.openings.items():
c = k[:6].strip()
if grouping != c:
grouping = c
self.opening_list.addItem('==== '+grouping+' ====')
opening = OpeningListItem(k, v, index)
self.opening_list.addItem(opening)
index += 1
def populate_game_list_from_pgn(self, file_name):
self.pgn_file = open(file_name)
self.games_list.clear()
index = 1
while True:
offset = self.pgn_file.tell()
header = chess.pgn.read_headers(self.pgn_file)
if header is None:
break
game = GameListItem(offset, header, index)
self.games_list.addItem(game)
index += 1
self.update()
def populate_tactics_list_from_pgn(self, file_name):
self.tactics_file = open(file_name)
self.tactics_list.clear()
index = 1
while True:
offset = self.tactics_file.tell()
header = chess.pgn.read_headers(self.tactics_file)
if header is None:
break
game = TacticsListItem(offset, header, index)
self.tactics_list.addItem(game)
index += 1
self.update()
def populate_coord_learn_list(self):
self.coord_learn.addItem(CoordListItem('Rank (white)', 0, 0))
self.coord_learn.addItem(CoordListItem('File (white)', 1, 0))
self.coord_learn.addItem(CoordListItem('Position (white)', 2, 0))
self.coord_learn.addItem(CoordListItem('Rank (black)', 0, 1))
self.coord_learn.addItem(CoordListItem('File (black)', 1, 1))
self.coord_learn.addItem(CoordListItem('Position (black)', 2, 1))
self.coord_learn.addItem(CoordListItem('Random (both)', 2, 2))
self.update()
# Game list Double Clicked
def on_list_dbl_click(self, selected_item):
self.pgn_file.seek(selected_item.offset)
selected_game = chess.pgn.read_game(self.pgn_file)
self.static_board = selected_game.board()
tab_caption = selected_item.text()[:7]+'...'
self.tabs.addTab(QGame(self, selected_game, selected_item.text()), tab_caption)
# open the latest tab
self.tabs.setCurrentIndex(self.tabs.count()-1)
self.add_message('Shadowing game: '+selected_item.text())
# Tactics list Double Clicked
def on_tactics_list_dbl_click(self, selected_item):
self.tactics_file.seek(selected_item.offset)
selected_game = chess.pgn.read_game(self.tactics_file)
self.static_board = selected_game.board()
tab_caption = selected_item.text()[:7]+'...'
self.tabs.addTab(QGame(self, selected_game, selected_item.text()), tab_caption)
# open the latest tab
self.tabs.setCurrentIndex(self.tabs.count()-1)
self.add_message('Tactics: '+selected_item.text())
# Opening list Double Clicked
def on_opening_list_dbl_click(self, selected_item):
selected_game = chess.pgn.Game()
self.static_board = selected_game.board()
selected_game.add_line(selected_item.value[1])
tab_caption = selected_item.text()[:7]+'...'
self.tabs.addTab(QGame(self, selected_game, selected_item.text()), tab_caption)
self.add_message('Opening: '+selected_item.text())
self.tabs.setCurrentIndex(self.tabs.count()-1)
self.add_message('Opening: '+selected_item.text())
# Opening list Double Clicked
def on_coord_learn_dbl_click(self, selected_item):
self.static_board = None
tab_caption = selected_item.text()
self.tabs.addTab(CoordLearn(self, selected_item.text(), selected_item.gametype, selected_item.color), tab_caption)
self.tabs.setCurrentIndex(self.tabs.count()-1)
self.add_message('Learn: '+selected_item.text())
def add_message(self, msg):
#self.msg_list.addItem(msg)
self.msg_list.insertItem(0, msg)
def evaluate_board(self, board, time=1):
# evaluate move score
while self.engine_busy:
pass
self.engine_busy = True
info = self.engine.analyse(board, chess.engine.Limit(time=time))
self.engine_busy = False
return info['score'].relative.score(mate_score=100000), info['pv']
def evaluate_moves(self, board, moves_list):
# evaluate move score
while self.engine_busy:
pass
moves_score = {}
self.engine_busy = True
info = self.engine.analyse(board, chess.engine.Limit(time=1), multipv=len(moves_list), root_moves=moves_list)
for i in range(len(info)):
moves_score[info[i]['pv'][0]] = info[i]['score'].relative.score(mate_score=100000)
self.engine_busy = False
return moves_score
def closeEvent(self, e):
print('... quitting!')
self.book.close()
self.engine.quit()
def createServer(self):
ip_port, do = QInputDialog.getText(self, 'Create Server', 'IP:Port', QLineEdit.Normal, 'localhost:5555')
if do:
ip, port = ip_port.split(':')
print('Creating server', ip_port)
server = Server(ip, int(port))
res = server.connect()
print(res[1])
if res[0]:
start_new_thread(self.threaded_server, (server,))
defuser = 'User'+str(random.randint(1000, 9999))
username, do = QInputDialog.getText(self, 'Join Server as', 'Username', QLineEdit.Normal, defuser)
if do:
print('Connecting as', username)
tab_caption = 'Server @'+ip_port
tab = TabServer(self, tab_caption)
tab.server = server
tab.client = Client(ip, int(port), username, tab)
self.tabs.addTab(tab, tab_caption)
self.tabs.setCurrentIndex(self.tabs.count()-1)
def threaded_server(self, server):
server.listen()
def joinServer(self):
ip_port, do = QInputDialog.getText(self, 'Join Server', 'IP:Port', QLineEdit.Normal, 'localhost:5555')
if do:
ip, port = ip_port.split(':')
defuser = 'User'+str(random.randint(1000, 9999))
username, do = QInputDialog.getText(self, 'Join Server as', 'Username', QLineEdit.Normal, defuser)
if do:
print('Connecting as', username)
tab_caption = 'Joined @'+ip_port
tab = TabServer(self, tab_caption)
tab.client = Client(ip, int(port), username, tab)
if tab.client.connected:
self.tabs.addTab(tab, tab_caption)
self.tabs.setCurrentIndex(self.tabs.count()-1)
else:
print('Connection Failed!')
if __name__ == '__main__':
app = QApplication(sys.argv)
window = App()
sys.exit(app.exec_())
| {
"content_hash": "963d0ed6b74552979d5f48f6f9bb956b",
"timestamp": "",
"source": "github",
"line_count": 725,
"max_line_length": 181,
"avg_line_length": 35.98758620689655,
"alnum_prop": 0.5682419225020122,
"repo_name": "aole/Chess-Coach",
"id": "915017f48972f6ed8abe80be3232c57e6e02d45c",
"size": "26091",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17535"
}
],
"symlink_target": ""
} |
from codecs import open
from setuptools import find_packages, setup
__version__ = "0.1.0"
__author__ = "SkyLothar"
__email__ = "allothar@gmail.com"
__url__ = "http://github.com"
with open("README.rst", "r", "utf-8") as f:
readme = f.read()
with open("requirements.txt", "r", "utf-8") as f:
install_requirements = f.read()
with open("tests/requirements.txt", "r", "utf-8") as f:
test_requirements = f.read()
setup(
name="certbot-dns-dnspod",
version=__version__,
description="DNSPOD DNS Authenticator plugin for Certbot",
long_description=readme,
author=__author__,
author_email=__email__,
url=__url__,
license="Apache License 2.0",
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Plugins",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: Apache Software License",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Security",
"Topic :: System :: Installation/Setup",
"Topic :: System :: Networking",
"Topic :: System :: Systems Administration",
"Topic :: Utilities",
],
packages=find_packages(),
include_package_data=True,
install_requires=install_requirements,
tests_require=test_requirements,
entry_points={
"certbot.plugins": [
"dns-dnspod = certbot_dns_dnspod.dns_dnspod:Authenticator",
],
},
test_suite="certbot_dns_dnspod",
)
| {
"content_hash": "b59eba7ade05b0aae47466af9604a479",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 71,
"avg_line_length": 32.266666666666666,
"alnum_prop": 0.5945247933884298,
"repo_name": "SkyLothar/certbot-dns-dnspod",
"id": "c77e50ae0b5747db6f1384d3ebc72d74746e590b",
"size": "1936",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16761"
}
],
"symlink_target": ""
} |
import json
import urllib2
import urlparse
import ssl
def dcat_to_utf8_dict(url):
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
return json.loads(urllib2.urlopen(url, context=ctx).read().decode('utf-8'))
def get_extension_from_url(u):
# SEE: https://docs.python.org/2/library/urlparse.html
path = urlparse(u)[2]
return path.split('.')[-1].strip().lower()
| {
"content_hash": "034250ae0a34783102b1612dff2543c3",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 79,
"avg_line_length": 25.58823529411765,
"alnum_prop": 0.6827586206896552,
"repo_name": "mxabierto/ckanops",
"id": "c0f9cc2034296f89747612701d4e1b136157a316",
"size": "481",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ckanops/utils.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1696"
},
{
"name": "Python",
"bytes": "25690"
}
],
"symlink_target": ""
} |
import abc
from neutron.api.v2 import attributes
from oslo_log import log as logging
import six
from sqlalchemy.orm import exc as orm_exc
from gbpservice.common import utils
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class PolicyTargetContext(object):
"""Context passed to policy engine for policy_target resource changes.
A PolicyTargetContext instance wraps a policy_target resource. It provides
helper methods for accessing other relevant information. Results
from expensive operations are cached for convenient access.
"""
@abc.abstractproperty
def current(self):
"""Return the current state of the policy_target.
Return the current state of the policy_target, as defined by
GroupPolicyPlugin.create_policy_target.
"""
pass
@abc.abstractproperty
def original(self):
"""Return the original state of the policy_target.
Return the original state of the policy_target, prior to a call to
update_policy_target. Method is only valid within calls to
update_policy_target_precommit and update_policy_target_postcommit.
"""
pass
@abc.abstractmethod
def set_port_id(self, port_id):
"""Set the port for the policy_target.
:param port_id: Port to which policy_target is mapped.
Set the neutron port to which the policy_target is mapped.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class PolicyTargetGroupContext(object):
"""Context passed to policy engine for policy_target_group resource changes.
PolicyTargetContext instance wraps a policy_target_group resource. It
provides helper methods for accessing other relevant information. Results
from expensive operations are cached for convenient access.
"""
@abc.abstractproperty
def current(self):
"""Return the current state of the policy_target_group.
Return the current state of the policy_target_group, as defined by
GroupPolicyPlugin.create_policy_target_group.
"""
pass
@abc.abstractproperty
def original(self):
"""Return the original state of the policy_target_group.
Return the original state of the policy_target_group, prior to a call
to update_policy_target_group. Method is only valid within calls to
update_policy_target_group_precommit and
update_policy_target_group_postcommit.
"""
pass
@abc.abstractmethod
def set_l2_policy_id(self, l2_policy_id):
"""Set the l2_policy for the policy_target_group.
:param l2_policy_id: l2_policy for the policy_target_group.
Set the l2_policy for the policy_target_group.
"""
pass
@abc.abstractmethod
def set_network_service_policy_id(self, network_service_policy_id):
"""Set the network_service_policy for the policy_target_group.
:param network_service_policy_id: network_service_policy for the ptg.
Set the network_service_policy for the policy_target_group.
"""
pass
@abc.abstractmethod
def add_subnet(self, subnet_id):
"""Add the subnet to the policy_target_group.
:param subnet_id: Subnet to which policy_target_group is mapped.
Add a neutron subnet to the set of subnets to which the
policy_target_group is mapped.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class L2PolicyContext(object):
"""Context passed to policy engine for l2_policy resource changes.
A L2_ContextContext instance wraps an l2_policy resource. It provides
helper methods for accessing other relevant information. Results
from expensive operations are cached for convenient access.
"""
@abc.abstractproperty
def current(self):
"""Return the current state of the l2_policy.
Return the current state of the l2_policy, as defined by
GroupPolicyPlugin.create_l2_policy.
"""
pass
@abc.abstractproperty
def original(self):
"""Return the original state of the l2_policy.
Return the original state of the l2_policy, prior to a call to
update_l2_policy. Method is only valid within calls to
update_l2_policy_precommit and update_l2_policy_postcommit.
"""
pass
@abc.abstractmethod
def set_l3_policy_id(self, l3_policy_id):
"""Set the l3_policy for the l2_policy.
:param l3_policy_id: l3_policy for the l2_policy.
Set the l3_policy for the l2_policy.
"""
pass
@abc.abstractmethod
def set_network_id(self, network_id):
"""Set the network for the l2_policy.
:param network_id: Network to which l2_policy is mapped.
Set the neutron network to which the l2_policy is mapped.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class L3PolicyContext(object):
"""Context passed to policy engine for l3_policy resource changes.
A L3PolicyContext instance wraps an l3_policy resource.
It provides helper methods for accessing other relevant information.
Results from expensive operations are cached for convenient access.
"""
@abc.abstractproperty
def current(self):
"""Return the current state of the l3_policy.
Return the current state of the l3_policy, as defined by
GroupPolicyPlugin.create_l3_policy.
"""
pass
@abc.abstractproperty
def original(self):
"""Return the original state of the l3_policy.
Return the original state of the l3_policy, prior to a call to
update_l3_policy. Method is only valid within calls to
update_l3_policy_precommit and update_l3_policy_postcommit.
"""
pass
@abc.abstractmethod
def add_router(self, router_id):
"""Add the router to the l3_policy.
:param router_id: Router to which l3_policy is mapped.
Add a neutron router to the set of routers to which the
l3_policy is mapped.
"""
pass
@abc.abstractmethod
def set_external_fixed_ips(self, external_segment_id, ips):
"""Add the external_fixed_ips to the l3_policy.
:param external_segment_id: ES to which l3_policy is mapped.
:param ips: IPs assigned for that ES.
"""
pass
@abc.abstractmethod
def set_external_segment(self, external_segment_id):
"""Add the external_segment to the l3_policy.
:param external_segment_id: ES to which l3_policy is mapped.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class NetworkServicePolicyContext(object):
"""
Context passed to policy engine for network_service_policy resource
changes.
A NetworkServicePolicyContext instance wraps a network_service_policy
resource. It provides helper methods for accessing other relevant
information. Results from expensive operations are cached for convenient
access.
"""
@abc.abstractproperty
def current(self):
"""Return the current state of the network_service_policy.
Return the current state of the network_service_policy, as defined by
GroupPolicyPlugin.create_network_service_policy.
"""
pass
@abc.abstractproperty
def original(self):
"""Return the original state of the network_service_policy.
Return the original state of the network_service_policy, prior to a
call to
update_network_service_policy. Method is only valid within calls to
update_network_service_policy_precommit and
update_network_service_policy_postcommit.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class PolicyClassifierContext(object):
"""Context passed to policy engine for policy_classifier resource changes.
An PolicyClassifierContext instance wraps a policy_classifier resource.
It provides helper methods for accessing other relevant information.
Results from expensive operations are cached for convenient access.
"""
@abc.abstractproperty
def current(self):
"""Return the current state of the policy_classifier.
Return the current state of the policy_classifier, as defined by
GroupPolicyPlugin.create_policy_classifier.
"""
pass
@abc.abstractproperty
def original(self):
"""Return the original state of the policy_classifier.
Return the original state of the policy_classifier, prior to a call to
update_policy_classifier. Method is only valid within calls to
update_policy_classifier_precommit and
update_policy_classifier_postcommit.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class PolicyActionContext(object):
"""Context passed to policy engine for policy_action resource changes.
An PolicyActionContext instance wraps a policy_action resource.
It provides helper methods for accessing other relevant information.
Results from expensive operations are cached for convenient access.
"""
@abc.abstractproperty
def current(self):
"""Return the current state of the policy_action.
Return the current state of the policy_action, as defined by
GroupPolicyPlugin.create_policy_action.
"""
pass
@abc.abstractproperty
def original(self):
"""Return the original state of the policy_action.
Return the original state of the policy_action, prior to a call to
update_policy_action. Method is only valid within calls to
update_policy_action_precommit and update_policy_action_postcommit.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class PolicyRuleContext(object):
"""Context passed to policy engine for policy_rule resource changes.
An PolicyRuleContext instance wraps a policy_rule resource.
It provides helper methods for accessing other relevant information.
Results from expensive operations are cached for convenient access.
"""
@abc.abstractproperty
def current(self):
"""Return the current state of the policy_rule.
Return the current state of the policy_rule, as defined by
GroupPolicyPlugin.create_policy_rule.
"""
pass
@abc.abstractproperty
def original(self):
"""Return the original state of the policy_rule.
Return the original state of the policy_rule, prior to a call to
update_policy_rule. Method is only valid within calls to
update_policy_rule_precommit and
update_policy_rule_postcommit.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class PolicyRuleSetContext(object):
"""Context passed to policy engine for changes to policy_rule_set resources.
PolicyRuleSetContext instance wraps a policy_rule_set resource. It
provides helper methods for accessing other relevant information. Results
from expensive operations are cached for convenient access.
"""
@abc.abstractproperty
def current(self):
"""Return the current state of the policy_rule_set.
Return the current state of the policy_rule_set, as defined by
GroupPolicyPlugin.create_policy_rule_set.
"""
pass
@abc.abstractproperty
def original(self):
"""Return the original state of the policy_rule_set.
Return the original state of the policy_rule_set, prior to a call to
update_policy_rule_set. Method is only valid within calls to
update_policy_rule_set_precommit and update_policy_rule_set_postcommit.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class ExternalSegmentContext(object):
"""Context passed to policy engine for external_segment resource.
A ExternalSegmentContext instance wraps an external_segment
resource.
It provides helper methods for accessing other relevant information.
Results from expensive operations are cached for convenient access.
"""
@abc.abstractproperty
def current(self):
"""Return the current state of the external_segment.
Return the current state of the external_segment, as defined by
GroupPolicyPlugin.create_external_segment.
"""
pass
@abc.abstractproperty
def original(self):
"""Return the original state of the external_segment.
Return the original state of the external_segment, prior to a
call to update_external_segment. Method is only valid within
calls to update_external_segment_precommit and
update_external_segment_postcommit.
"""
pass
@abc.abstractmethod
def add_subnet(self, subnet_id):
"""Add the subnet to the external_segment.
:param subnet_id: Subnet to which external_segment is mapped.
Add a neutron subnet to the set of routers to which the
external_segment is mapped.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class ExternalPolicyContext(object):
"""Context passed to policy engine for external_policy resource.
A ExternalPolicyContext instance wraps an external_policy
resource.
It provides helper methods for accessing other relevant information.
Results from expensive operations are cached for convenient access.
"""
@abc.abstractproperty
def current(self):
"""Return the current state of the external_policy.
Return the current state of the external_policy, as defined by
GroupPolicyPlugin.create_external_policy.
"""
pass
@abc.abstractproperty
def original(self):
"""Return the original state of the external_policy.
Return the original state of the external_policy, prior to a
call to update_external_policy. Method is only valid within
calls to update_external_policy_precommit and
update_external_policy_postcommit.
"""
pass
@abc.abstractmethod
def set_external_segment(self, external_segment_id):
"""Add the external_segment to the external_policy.
:param external_segment_id: ES to which external_policy is mapped.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class NatPoolContext(object):
"""Context passed to policy engine for nat_pool resource.
A NatPoolContext instance wraps an nat_pool
resource.
It provides helper methods for accessing other relevant information.
Results from expensive operations are cached for convenient access.
"""
@abc.abstractproperty
def current(self):
"""Return the current state of the nat_pool.
Return the current state of the nat_pool, as defined by
GroupPolicyPlugin.create_nat_pool.
"""
pass
@abc.abstractproperty
def original(self):
"""Return the original state of the nat_pool.
Return the original state of the nat_pool, prior to a
call to update_nat_pool. Method is only valid within
calls to update_nat_pool_precommit and
update_nat_pool_postcommit.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class PolicyDriver(object):
"""Define stable abstract interface for Group Policy drivers.
A policy driver is called on the creation, update, and deletion
of all Group Policy resources. For every event, there are two methods that
get called - one within the database transaction (method suffix of
_precommit), one right afterwards (method suffix of _postcommit).
Exceptions raised by methods called inside the transaction can
rollback, but should not make any blocking calls (for example,
REST requests to an outside controller). Methods called after
transaction commits can make blocking external calls, though these
will block the entire process. Exceptions raised in calls after
the transaction commits may cause the associated resource to be
deleted.
Because rollback outside of the transaction is not done in the
case of update of resources, all data validation must be done within
methods that are part of the database transaction.
"""
@abc.abstractmethod
def initialize(self):
"""Perform driver initialization.
Called after all drivers have been loaded and the database has
been initialized. No abstract methods defined below will be
called prior to this method being called.
"""
pass
def create_policy_target_precommit(self, context):
"""Allocate resources for a new policy_target.
:param context: PolicyTargetContext instance describing the new
policy_target.
"""
pass
def create_policy_target_postcommit(self, context):
"""Create a policy_target.
:param context: PolicyTargetContext instance describing the new
policy_target.
"""
pass
def update_policy_target_precommit(self, context):
"""Update resources of a policy_target.
:param context: PolicyTargetContext instance describing the new
state of the policy_target, as well as the original state prior
to the update_policy_target call.
"""
pass
def update_policy_target_postcommit(self, context):
"""Update a policy_target.
:param context: PolicyTargetContext instance describing the new
state of the policy_target, as well as the original state prior
to the update_policy_target call.
"""
pass
def delete_policy_target_precommit(self, context):
"""Delete resources for a policy_target.
:param context: PolicyTargetContext instance describing the current
state of the policy_target, prior to the call to delete it.
"""
pass
def delete_policy_target_postcommit(self, context):
"""Delete a policy_target.
:param context: PolicyTargetContext instance describing the current
state of the policy_target, prior to the call to delete it.
"""
pass
def create_policy_target_group_precommit(self, context):
"""Allocate resources for a new policy_target_group.
:param context: PolicyTargetGroupContext instance describing the new
policy_target_group.
"""
pass
def create_policy_target_group_postcommit(self, context):
"""Create a policy_target_group.
:param context: PolicyTargetGroupContext instance describing the new
policy_target_group.
"""
pass
def update_policy_target_group_precommit(self, context):
"""Update resources of a policy_target_group.
:param context: PolicyTargetGroupContext instance describing the new
state of the policy_target_group, as well as the original state prior
to the update_policy_target_group call.
"""
pass
def update_policy_target_group_postcommit(self, context):
"""Update a policy_target_group.
:param context: PolicyTargetGroupContext instance describing the new
state of the policy_target_group, as well as the original state prior
to the update_policy_target_group call.
"""
pass
def delete_policy_target_group_precommit(self, context):
"""Delete resources for a policy_target_group.
:param context: PolicyTargetGroupContext instance describing the
current state of the policy_target_group, prior to the call to delete
it.
"""
pass
def delete_policy_target_group_postcommit(self, context):
"""Delete a policy_target_group.
:param context: PolicyTargetGroupContext instance describing the
current state of the policy_target_group, prior to the call to delete
it.
"""
pass
def create_l2_policy_precommit(self, context):
"""Allocate resources for a new l2_policy.
:param context: L2PolicyContext instance describing the new
l2_policy.
"""
pass
def create_l2_policy_postcommit(self, context):
"""Create a l2_policy.
:param context: L2PolicyContext instance describing the new
l2_policy.
"""
pass
def update_l2_policy_precommit(self, context):
"""Update resources of a l2_policy.
:param context: L2PolicyContext instance describing the new
state of the l2_policy, as well as the original state prior
to the update_l2_policy call.
"""
pass
def update_l2_policy_postcommit(self, context):
"""Update a l2_policy.
:param context: L2PolicyContext instance describing the new
state of the l2_policy, as well as the original state prior
to the update_l2_policy call.
"""
pass
def delete_l2_policy_precommit(self, context):
"""Delete resources for a l2_policy.
:param context: L2PolicyContext instance describing the current
state of the l2_policy, prior to the call to delete it.
"""
pass
def delete_l2_policy_postcommit(self, context):
"""Delete a l2_policy.
:param context: L2PolicyContext instance describing the current
state of the l2_policy, prior to the call to delete it.
"""
pass
def create_l3_policy_precommit(self, context):
"""Allocate resources for a new l3_policy.
:param context: L3PolicyContext instance describing the new
l3_policy.
"""
pass
def create_l3_policy_postcommit(self, context):
"""Create a l3_policy.
:param context: L3PolicyContext instance describing the new
l3_policy.
"""
pass
def update_l3_policy_precommit(self, context):
"""Update resources of a l3_policy.
:param context: L3PolicyContext instance describing the new
state of the l3_policy, as well as the original state prior
to the update_l3_policy call.
"""
pass
def update_l3_policy_postcommit(self, context):
"""Update a l3_policy.
:param context: L3PolicyContext instance describing the new
state of the l3_policy, as well as the original state prior
to the update_l3_policy call.
"""
pass
def delete_l3_policy_precommit(self, context):
"""Delete resources for a l3_policy.
:param context: L3PolicyContext instance describing the current
state of the l3_policy, prior to the call to delete it.
"""
pass
def delete_l3_policy_postcommit(self, context):
"""Delete a l3_policy.
:param context: L3PolicyContext instance describing the current
state of the l3_policy, prior to the call to delete it.
"""
pass
def create_policy_classifier_precommit(self, context):
"""Allocate resources for a new policy_classifier.
:param context: PolicyClassifierContext instance describing the new
policy_classifier.
"""
pass
def create_policy_classifier_postcommit(self, context):
"""Create a policy_classifier.
:param context: PolicyClassifierContext instance describing the new
policy_classifier.
"""
pass
def update_policy_classifier_precommit(self, context):
"""Update resources of a policy_classifier.
:param context: PolicyClassifierContext instance describing the new
state of the policy_classifier, as well as the original state prior
to the update_policy_classifier call.
"""
pass
def update_policy_classifier_postcommit(self, context):
"""Update a policy_classifier.
:param context: PolicyClassifierContext instance describing the new
state of the policy_classifier, as well as the original state prior
to the update_policy_classifier call.
"""
pass
def delete_policy_classifier_precommit(self, context):
"""Delete resources for a policy_classifier.
:param context: PolicyClassifierContext instance describing the current
state of the policy_classifier, prior to the call to delete it.
"""
pass
def delete_policy_classifier_postcommit(self, context):
"""Delete a policy_classifier.
:param context: PolicyClassifierContext instance describing the current
state of the policy_classifier, prior to the call to delete it.
"""
pass
def create_policy_action_precommit(self, context):
"""Allocate resources for a new policy_action.
:param context: PolicyActionContext instance describing the new
policy_action.
"""
pass
def create_policy_action_postcommit(self, context):
"""Create a policy_action.
:param context: PolicyActionContext instance describing the new
policy_action.
"""
pass
def update_policy_action_precommit(self, context):
"""Update resources of a policy_action.
:param context: PolicyActionContext instance describing the new
state of the policy_action, as well as the original state prior
to the update_policy_action call.
"""
pass
def update_policy_action_postcommit(self, context):
"""Update a policy_action.
:param context: PolicyActionContext instance describing the new
state of the policy_action, as well as the original state prior
to the update_policy_action call.
"""
pass
def delete_policy_action_precommit(self, context):
"""Delete resources for a policy_action.
:param context: PolicyActionContext instance describing the current
state of the policy_action, prior to the call to delete it.
"""
pass
def delete_policy_action_postcommit(self, context):
"""Delete a policy_action.
:param context: PolicyActionContext instance describing the current
state of the policy_action, prior to the call to delete it.
"""
pass
def create_policy_rule_precommit(self, context):
"""Allocate resources for a new policy_rule.
:param context: PolicyRuleContext instance describing the new
policy_rule.
"""
pass
def create_policy_rule_postcommit(self, context):
"""Create a policy_rule.
:param context: PolicyRuleContext instance describing the new
policy_rule.
"""
pass
def update_policy_rule_precommit(self, context):
"""Update resources of a policy_rule.
:param context: PolicyRuleContext instance describing the new
state of the policy_rule, as well as the original state prior
to the update_policy_rule call.
"""
pass
def update_policy_rule_postcommit(self, context):
"""Update a policy_rule.
:param context: PolicyRuleContext instance describing the new
state of the policy_rule, as well as the original state prior
to the update_policy_rule call.
"""
pass
def delete_policy_rule_precommit(self, context):
"""Delete resources for a policy_rule.
:param context: PolicyRuleContext instance describing the current
state of the policy_rule, prior to the call to delete it.
"""
pass
def delete_policy_rule_postcommit(self, context):
"""Delete a policy_rule.
:param context: PolicyRuleContext instance describing the current
state of the policy_rule, prior to the call to delete it.
"""
pass
def create_policy_rule_set_precommit(self, context):
"""Allocate resources for a new policy_rule_set.
:param context: PolicyRuleSetContext instance describing the new
policy_rule_set.
"""
pass
def create_policy_rule_set_postcommit(self, context):
"""Create a policy_rule_set.
:param context: PolicyRuleSetContext instance describing the new
policy_rule_set.
"""
pass
def update_policy_rule_set_precommit(self, context):
"""Update resources of a policy_rule_set.
:param context: PolicyRuleSetContext instance describing the new
state of the policy_rule_set, as well as the original state prior
to the update_policy_rule_set call.
"""
pass
def update_policy_rule_set_postcommit(self, context):
"""Update a policy_rule_set.
:param context: PolicyRuleSetContext instance describing the new
state of the policy_rule_set, as well as the original state prior
to the update_policy_rule_set call.
"""
pass
def delete_policy_rule_set_precommit(self, context):
"""Delete resources for a policy_rule_set.
:param context: PolicyRuleSetContext instance describing the current
state of the policy_rule_set, prior to the call to delete it.
"""
pass
def delete_policy_rule_set_postcommit(self, context):
"""Delete a policy_rule_set.
:param context: PolicyRuleSetContext instance describing the current
state of the policy_rule_set, prior to the call to delete it.
"""
pass
def create_network_service_policy_precommit(self, context):
"""Allocate resources for a new network service policy.
:param context: NetworkServicePolicyContext instance describing the new
network service policy.
"""
pass
def create_network_service_policy_postcommit(self, context):
"""Create a network service policy.
:param context: NetworkServicePolicyContext instance describing the new
network service policy.
"""
pass
def update_network_service_policy_precommit(self, context):
"""Update resources of a network service policy.
:param context: NetworkServicePolicyContext instance describing the new
state of the NetworkServicePolicy, as well as the original state prior
to the update_network_service_policy call.
"""
pass
def update_network_service_policy_postcommit(self, context):
"""Update a network service policy.
:param context: NetworkServicePolicyContext instance describing the new
state of the NetworkServicePolicy, as well as the original state prior
to the update_network_service_policy call.
"""
pass
def delete_network_service_policy_precommit(self, context):
"""Delete resources for a network service policy.
:param context: NetworkServicePolicyContext instance describing the
current state of the NetworkServicePolicy, prior to the call to
delete it.
"""
pass
def delete_network_service_policy_postcommit(self, context):
"""Delete a network service policy.
:param context: NetworkServicePolicyContext instance describing the
current state of the NetworkServicePolicy, prior to the call to
delete it.
"""
pass
def create_external_segment_precommit(self, context):
"""Allocate resources for a new network service policy.
:param context: ExternalSegmentContext instance describing the
new network service policy.
"""
pass
def create_external_segment_postcommit(self, context):
"""Create a network service policy.
:param context: ExternalSegmentContext instance describing the
new network service policy.
"""
pass
def update_external_segment_precommit(self, context):
"""Update resources of a network service policy.
:param context: ExternalSegmentContext instance describing the
new state of the ExternalSegment, as well as the original state
prior to the update_external_segment call.
"""
pass
def update_external_segment_postcommit(self, context):
"""Update a network service policy.
:param context: ExternalSegmentContext instance describing the
new state of the ExternalSegment, as well as the original state
prior to the update_external_segment call.
"""
pass
def delete_external_segment_precommit(self, context):
"""Delete resources for a network service policy.
:param context: ExternalSegmentContext instance describing the
current state of the ExternalSegment, prior to the call to
delete it.
"""
pass
def delete_external_segment_postcommit(self, context):
"""Delete a network service policy.
:param context: ExternalSegmentContext instance describing the
current state of the ExternalSegment, prior to the call to
delete it.
"""
pass
def create_external_policy_precommit(self, context):
"""Allocate resources for a new network service policy.
:param context: ExternalPolicyContext instance describing the
new network service policy.
"""
pass
def create_external_policy_postcommit(self, context):
"""Create a network service policy.
:param context: ExternalPolicyContext instance describing the
new network service policy.
"""
pass
def update_external_policy_precommit(self, context):
"""Update resources of a network service policy.
:param context: ExternalPolicyContext instance describing the
new state of the ExternalPolicy, as well as the original state
prior to the update_external_policy call.
"""
pass
def update_external_policy_postcommit(self, context):
"""Update a network service policy.
:param context: ExternalPolicyContext instance describing the
new state of the ExternalPolicy, as well as the original state
prior to the update_external_policy call.
"""
pass
def delete_external_policy_precommit(self, context):
"""Delete resources for a network service policy.
:param context: ExternalPolicyContext instance describing the
current state of the ExternalPolicy, prior to the call to
delete it.
"""
pass
def delete_external_policy_postcommit(self, context):
"""Delete a network service policy.
:param context: ExternalPolicyContext instance describing the
current state of the ExternalPolicy, prior to the call to
delete it.
"""
pass
def create_nat_pool_precommit(self, context):
"""Allocate resources for a new network service policy.
:param context: NatPoolContext instance describing the
new network service policy.
"""
pass
def create_nat_pool_postcommit(self, context):
"""Create a network service policy.
:param context: NatPoolContext instance describing the
new network service policy.
"""
pass
def update_nat_pool_precommit(self, context):
"""Update resources of a network service policy.
:param context: NatPoolContext instance describing the
new state of the NatPool, as well as the original state
prior to the update_nat_pool call.
"""
pass
def update_nat_pool_postcommit(self, context):
"""Update a network service policy.
:param context: NatPoolContext instance describing the
new state of the NatPool, as well as the original state
prior to the update_nat_pool call.
"""
pass
def delete_nat_pool_precommit(self, context):
"""Delete resources for a network service policy.
:param context: NatPoolContext instance describing the
current state of the NatPool, prior to the call to
delete it.
"""
pass
def delete_nat_pool_postcommit(self, context):
"""Delete a network service policy.
:param context: NatPoolContext instance describing the
current state of the NatPool, prior to the call to
delete it.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class ExtensionDriver(object):
"""Define stable abstract interface for Group Policy extension drivers.
An extension driver extends the core resources implemented by the
group policy service plugin with additional attributes. Methods
that process create and update operations for these resources
validate and persist values for extended attributes supplied
through the API. Other methods extend the resource dictionaries
returned from the API operations with the values of the extended
attributes.
"""
@abc.abstractmethod
def initialize(self):
"""Perform driver initialization.
Called after all drivers have been loaded and the database has
been initialized. No abstract methods defined below will be
called prior to this method being called.
"""
pass
@abc.abstractproperty
def extension_alias(self):
"""Supported extension alias.
Return the alias identifying the Group Policy API extension
supported by this driver.
"""
pass
def process_create_policy_target(self, session, data, result):
"""Process extended attributes for policy_target creation.
:param session: database session
:param data: dictionary of incoming policy_target data
:param result: policy_target dictionary to extend
Called inside transaction context on session to validate and
persist any extended policy_target attributes defined by this
driver. Extended attribute values must also be added to
result.
"""
pass
def process_update_policy_target(self, session, data, result):
"""Process extended attributes for policy_target update.
:param session: database session
:param data: dictionary of incoming policy_target data
:param result: policy_target dictionary to extend
Called inside transaction context on session to validate and
update any extended policy_target attributes defined by this
driver. Extended attribute values, whether updated or not,
must also be added to result.
"""
pass
def extend_policy_target_dict(self, session, result):
"""Add extended attributes to policy_target dictionary.
:param session: database session
:param result: policy_target dictionary to extend
Called inside transaction context on session to add any
extended attributes defined by this driver to a policy_target
dictionary to be used for mechanism driver calls and/or
returned as the result of a policy_target operation.
"""
pass
def process_create_policy_target_group(self, session, data, result):
"""Process extended attributes for policy_target_group creation.
:param session: database session
:param data: dictionary of incoming policy_target_group data
:param result: policy_target_group dictionary to extend
Called inside transaction context on session to validate and
persist any extended policy_target_group attributes defined by
this driver. Extended attribute values must also be added to
result.
"""
pass
def process_update_policy_target_group(self, session, data, result):
"""Process extended attributes for policy_target_group update.
:param session: database session
:param data: dictionary of incoming policy_target_group data
:param result: policy_target_group dictionary to extend
Called inside transaction context on session to validate and
update any extended policy_target_group attributes defined by
this driver. Extended attribute values, whether updated or
not, must also be added to result.
"""
pass
def extend_policy_target_group_dict(self, session, result):
"""Add extended attributes to policy_target_group dictionary.
:param session: database session
:param result: policy_target_group dictionary to extend
Called inside transaction context on session to add any
extended attributes defined by this driver to a
policy_target_group dictionary to be used for mechanism driver
calls and/or returned as the result of a policy_target_group
operation.
"""
pass
def process_create_l2_policy(self, session, data, result):
"""Process extended attributes for l2_policy creation.
:param session: database session
:param data: dictionary of incoming l2_policy data
:param result: l2_policy dictionary to extend
Called inside transaction context on session to validate and
persist any extended l2_policy attributes defined by this
driver. Extended attribute values must also be added to
result.
"""
pass
def process_update_l2_policy(self, session, data, result):
"""Process extended attributes for l2_policy update.
:param session: database session
:param data: dictionary of incoming l2_policy data
:param result: l2_policy dictionary to extend
Called inside transaction context on session to validate and
update any extended l2_policy attributes defined by this
driver. Extended attribute values, whether updated or not,
must also be added to result.
"""
pass
def extend_l2_policy_dict(self, session, result):
"""Add extended attributes to l2_policy dictionary.
:param session: database session
:param result: l2_policy dictionary to extend
Called inside transaction context on session to add any
extended attributes defined by this driver to a l2_policy
dictionary to be used for mechanism driver calls and/or
returned as the result of a l2_policy operation.
"""
pass
def process_create_l3_policy(self, session, data, result):
"""Process extended attributes for l3_policy creation.
:param session: database session
:param data: dictionary of incoming l3_policy data
:param result: l3_policy dictionary to extend
Called inside transaction context on session to validate and
persist any extended l3_policy attributes defined by this
driver. Extended attribute values must also be added to
result.
"""
pass
def process_update_l3_policy(self, session, data, result):
"""Process extended attributes for l3_policy update.
:param session: database session
:param data: dictionary of incoming l3_policy data
:param result: l3_policy dictionary to extend
Called inside transaction context on session to validate and
update any extended l3_policy attributes defined by this
driver. Extended attribute values, whether updated or not,
must also be added to result.
"""
pass
def extend_l3_policy_dict(self, session, result):
"""Add extended attributes to l3_policy dictionary.
:param session: database session
:param result: l3_policy dictionary to extend
Called inside transaction context on session to add any
extended attributes defined by this driver to a l3_policy
dictionary to be used for mechanism driver calls and/or
returned as the result of a l3_policy operation.
"""
pass
def process_create_policy_classifier(self, session, data, result):
"""Process extended attributes for policy_classifier creation.
:param session: database session
:param data: dictionary of incoming policy_classifier data
:param result: policy_classifier dictionary to extend
Called inside transaction context on session to validate and
persist any extended policy_classifier attributes defined by
this driver. Extended attribute values must also be added to
result.
"""
pass
def process_update_policy_classifier(self, session, data, result):
"""Process extended attributes for policy_classifier update.
:param session: database session
:param data: dictionary of incoming policy_classifier data
:param result: policy_classifier dictionary to extend
Called inside transaction context on session to validate and
update any extended policy_classifier attributes defined by
this driver. Extended attribute values, whether updated or
not, must also be added to result.
"""
pass
def extend_policy_classifier_dict(self, session, result):
"""Add extended attributes to policy_classifier dictionary.
:param session: database session
:param result: policy_classifier dictionary to extend
Called inside transaction context on session to add any
extended attributes defined by this driver to a
policy_classifier dictionary to be used for mechanism driver
calls and/or returned as the result of a policy_classifier
operation.
"""
pass
def process_create_policy_action(self, session, data, result):
"""Process extended attributes for policy_action creation.
:param session: database session
:param data: dictionary of incoming policy_action data
:param result: policy_action dictionary to extend
Called inside transaction context on session to validate and
persist any extended policy_action attributes defined by this
driver. Extended attribute values must also be added to
result.
"""
pass
def process_update_policy_action(self, session, data, result):
"""Process extended attributes for policy_action update.
:param session: database session
:param data: dictionary of incoming policy_action data
:param result: policy_action dictionary to extend
Called inside transaction context on session to validate and
update any extended policy_action attributes defined by this
driver. Extended attribute values, whether updated or not,
must also be added to result.
"""
pass
def extend_policy_action_dict(self, session, result):
"""Add extended attributes to policy_action dictionary.
:param session: database session
:param result: policy_action dictionary to extend
Called inside transaction context on session to add any
extended attributes defined by this driver to a policy_action
dictionary to be used for mechanism driver calls and/or
returned as the result of a policy_action operation.
"""
pass
def process_create_policy_rule(self, session, data, result):
"""Process extended attributes for policy_rule creation.
:param session: database session
:param data: dictionary of incoming policy_rule data
:param result: policy_rule dictionary to extend
Called inside transaction context on session to validate and
persist any extended policy_rule attributes defined by this
driver. Extended attribute values must also be added to
result.
"""
pass
def process_update_policy_rule(self, session, data, result):
"""Process extended attributes for policy_rule update.
:param session: database session
:param data: dictionary of incoming policy_rule data
:param result: policy_rule dictionary to extend
Called inside transaction context on session to validate and
update any extended policy_rule attributes defined by this
driver. Extended attribute values, whether updated or not,
must also be added to result.
"""
pass
def extend_policy_rule_dict(self, session, result):
"""Add extended attributes to policy_rule dictionary.
:param session: database session
:param result: policy_rule dictionary to extend
Called inside transaction context on session to add any
extended attributes defined by this driver to a policy_rule
dictionary to be used for mechanism driver calls and/or
returned as the result of a policy_rule operation.
"""
pass
def process_create_policy_rule_set(self, session, data, result):
"""Process extended attributes for policy_rule_set creation.
:param session: database session
:param data: dictionary of incoming policy_rule_set data
:param result: policy_rule_set dictionary to extend
Called inside transaction context on session to validate and
persist any extended policy_rule_set attributes defined by
this driver. Extended attribute values must also be added to
result.
"""
pass
def process_update_policy_rule_set(self, session, data, result):
"""Process extended attributes for policy_rule_set update.
:param session: database session
:param data: dictionary of incoming policy_rule_set data
:param result: policy_rule_set dictionary to extend
Called inside transaction context on session to validate and
update any extended policy_rule_set attributes defined by this
driver. Extended attribute values, whether updated or not,
must also be added to result.
"""
pass
def extend_policy_rule_set_dict(self, session, result):
"""Add extended attributes to policy_rule_set dictionary.
:param session: database session
:param result: policy_rule_set dictionary to extend
Called inside transaction context on session to add any
extended attributes defined by this driver to a
policy_rule_set dictionary to be used for mechanism driver
calls and/or returned as the result of a policy_rule_set
operation.
"""
pass
def process_create_network_service_policy(self, session, data, result):
"""Process extended attributes for network_service_policy creation.
:param session: database session
:param data: dictionary of incoming network_service_policy data
:param result: network_service_policy dictionary to extend
Called inside transaction context on session to validate and
persist any extended network_service_policy attributes defined
by this driver. Extended attribute values must also be added
to result.
"""
pass
def process_update_network_service_policy(self, session, data, result):
"""Process extended attributes for network_service_policy update.
:param session: database session
:param data: dictionary of incoming network_service_policy data
:param result: network_service_policy dictionary to extend
Called inside transaction context on session to validate and
update any extended network_service_policy attributes defined by this
driver. Extended attribute values, whether updated or not,
must also be added to result.
"""
pass
def extend_network_service_policy_dict(self, session, result):
"""Add extended attributes to network_service_policy dictionary.
:param session: database session
:param result: network_service_policy dictionary to extend
Called inside transaction context on session to add any
extended attributes defined by this driver to a
network_service_policy dictionary to be used for mechanism
driver calls and/or returned as the result of a
network_service_policy operation.
"""
pass
def process_create_external_segment(self, session, data, result):
"""Process extended attributes for external_segment creation.
:param session: database session
:param data: dictionary of incoming external_segment data
:param result: external_segment dictionary to extend
Called inside transaction context on session to validate and
persist any extended external_segment attributes defined
by this driver. Extended attribute values must also be added
to result.
"""
pass
def process_update_external_segment(self, session, data, result):
"""Process extended attributes for external_segment update.
:param session: database session
:param data: dictionary of incoming external_segment data
:param result: external_segment dictionary to extend
Called inside transaction context on session to validate and
update any extended external_segment attributes defined by this
driver. Extended attribute values, whether updated or not,
must also be added to result.
"""
pass
def extend_external_segment_dict(self, session, result):
"""Add extended attributes to external_segment dictionary.
:param session: database session
:param result: external_segment dictionary to extend
Called inside transaction context on session to add any
extended attributes defined by this driver to a
external_segment dictionary to be used for mechanism
driver calls and/or returned as the result of a
external_segment operation.
"""
pass
def process_create_external_policy(self, session, data, result):
"""Process extended attributes for external_policy creation.
:param session: database session
:param data: dictionary of incoming external_policy data
:param result: external_policy dictionary to extend
Called inside transaction context on session to validate and
persist any extended external_policy attributes defined
by this driver. Extended attribute values must also be added
to result.
"""
pass
def process_update_external_policy(self, session, data, result):
"""Process extended attributes for external_policy update.
:param session: database session
:param data: dictionary of incoming external_policy data
:param result: external_policy dictionary to extend
Called inside transaction context on session to validate and
update any extended external_policy attributes defined by this
driver. Extended attribute values, whether updated or not,
must also be added to result.
"""
pass
def extend_external_policy_dict(self, session, result):
"""Add extended attributes to external_policy dictionary.
:param session: database session
:param result: external_policy dictionary to extend
Called inside transaction context on session to add any
extended attributes defined by this driver to a
external_policy dictionary to be used for mechanism
driver calls and/or returned as the result of a
external_policy operation.
"""
pass
def process_create_nat_pool(self, session, data, result):
"""Process extended attributes for nat_pool creation.
:param session: database session
:param data: dictionary of incoming nat_pool data
:param result: nat_pool dictionary to extend
Called inside transaction context on session to validate and
persist any extended nat_pool attributes defined
by this driver. Extended attribute values must also be added
to result.
"""
pass
def process_update_nat_pool(self, session, data, result):
"""Process extended attributes for nat_pool update.
:param session: database session
:param data: dictionary of incoming nat_pool data
:param result: nat_pool dictionary to extend
Called inside transaction context on session to validate and
update any extended nat_pool attributes defined by this
driver. Extended attribute values, whether updated or not,
must also be added to result.
"""
pass
def extend_nat_pool_dict(self, session, result):
"""Add extended attributes to nat_pool dictionary.
:param session: database session
:param result: nat_pool dictionary to extend
Called inside transaction context on session to add any
extended attributes defined by this driver to a
nat_pool dictionary to be used for mechanism
driver calls and/or returned as the result of a
nat_pool operation.
"""
pass
def _default_process_create(self, session, data, result, type=None,
table=None, keys=None):
"""Default process create behavior.
Gives a default data storing behavior in order to avoid code
duplication across drivers. Use multiple times to fill multiple
tables if needed.
"""
kwargs = dict((x, data[type][x] if
attributes.is_attr_set(data[type][x]) else None)
for x in keys)
kwargs[type + '_' + 'id'] = result['id']
record = table(**kwargs)
session.add(record)
del kwargs[type + '_' + 'id']
result.update(kwargs)
def _default_process_update(self, session, data, result, type=None,
table=None, keys=None):
"""Default process update behavior.
Gives a default data storing behavior in order to avoid code
duplication across drivers. Use multiple times to fill multiple
tables if needed.
"""
try:
record = (session.query(table).filter_by(**{type + '_' + 'id':
result['id']}).one())
except orm_exc.NoResultFound:
# TODO(ivar) This is a preexisting object. For now just ignore
# this and return. Each extension driver should be able to specify
# a default behavior in case this happens.
return
for key in keys:
value = data[type].get(key)
if attributes.is_attr_set(value) and value != getattr(record, key):
setattr(record, key, value)
result[key] = getattr(record, key)
def _default_extend_dict(self, session, result, type=None,
table=None, keys=None):
"""Default dictionary extension behavior.
Gives a default dictionary extension behavior in order to avoid code
duplication across drivers. Use multiple times to fill from multiple
tables if needed.
"""
try:
record = (session.query(table).filter_by(**{type + '_' + 'id':
result['id']}).one())
except orm_exc.NoResultFound:
# TODO(ivar) This is a preexisting object. For now just ignore
# this and return. Each extension driver should be able to specify
# a default behavior in case this happens.
return
for key in keys:
result[key] = getattr(record, key)
def default_extension_behavior(table, keys=None):
def wrap(func):
def inner(inst, *args):
def filter_keys(inst, data, type):
plural = utils.get_resource_plural(type)
if keys:
return keys
definition = inst._extension_dict[plural]
return [x for x in definition if (x in data[type] if data else
True)]
name = func.__name__
if name.startswith('process_create_'):
# call default process create
type = name[len('process_create_'):]
inst._default_process_create(*args, type=type, table=table,
keys=filter_keys(inst, args[1], type))
# Complete result dictionary with unfiltered attributes
inst._default_extend_dict(args[0], args[2], type=type,
table=table,
keys=filter_keys(inst, None, type))
elif name.startswith('process_update_'):
# call default process update
type = name[len('process_update_'):]
inst._default_process_update(*args, type=type, table=table,
keys=filter_keys(inst, args[1], type))
# Complete result dictionary with unfiltered attributes
inst._default_extend_dict(args[0], args[2], type=type,
table=table,
keys=filter_keys(inst, None, type))
elif name.startswith('extend_') and name.endswith('_dict'):
# call default extend dict
type = name[len('extend_'):-len('_dict')]
inst._default_extend_dict(*args, type=type, table=table,
keys=filter_keys(inst, None, type))
# Now exec the actual function for postprocessing
func(inst, *args)
return inner
return wrap | {
"content_hash": "ad7f3b29fde2ae067182da11c5a0f374",
"timestamp": "",
"source": "github",
"line_count": 1760,
"max_line_length": 80,
"avg_line_length": 34.967045454545456,
"alnum_prop": 0.6615644600435475,
"repo_name": "tbachman/group-based-policy",
"id": "1e9e9e1544404a512584e6a5c9ed995a4e72b629",
"size": "62115",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gbpservice/neutron/services/grouppolicy/group_policy_driver_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "2130911"
},
{
"name": "Shell",
"bytes": "28973"
}
],
"symlink_target": ""
} |
import os.path
import numpy as np
import argparse
from glob import glob
from scikits.audiolab import Sndfile
from scikits.audiolab import Format
from sklearn.mixture import GMM
from MFCC import melScaling
#######################################################################
# some settings
framelen = 1024
fs = 44100.0
verbose = True
#######################################################################
# main class
class Smacpy:
"""Smacpy - simple-minded audio classifier in python. See the README file for more details.
USAGE EXAMPLE:
In this hypothetical example we train on four audio files, labelled as either 'usa' or 'uk', and then test on a separate audio file of someone called hubert:
from smacpy import Smacpy
model = Smacpy("wavs/training", {'karen01.wav':'usa', 'john01.wav':'uk', 'steve02.wav':'usa', 'joe03.wav':'uk'})
model.classify('wavs/testing/hubert01.wav')
Note for developers: this code should aim to be understandable, and not too long. Don't add too much functionality, or efficiency ;)
"""
def __init__(self, wavfolder, trainingdata):
"""Initialise the classifier and train it on some WAV files.
'wavfolder' is the base folder, to be prepended to all WAV paths.
'trainingdata' is a dictionary of wavpath:label pairs."""
self.mfccMaker = melScaling(int(fs), framelen/2, 40)
self.mfccMaker.update()
allfeatures = {wavpath:self.file_to_features(os.path.join(wavfolder, wavpath)) for wavpath in trainingdata}
# Determine the normalisation stats, and remember them
allconcat = np.vstack(list(allfeatures.values()))
self.means = np.mean(allconcat, 0)
self.invstds = np.std(allconcat, 0)
for i,val in enumerate(self.invstds):
if val == 0.0:
self.invstds[i] = 1.0
else:
self.invstds[i] = 1.0 / val
# For each label, compile a normalised concatenated list of features
aggfeatures = {}
for wavpath, features in allfeatures.items():
label = trainingdata[wavpath]
normed = self.__normalise(features)
if label not in aggfeatures:
aggfeatures[label] = normed
else:
aggfeatures[label] = np.vstack((aggfeatures[label], normed))
# For each label's aggregated features, train a GMM and remember it
self.gmms = {}
for label, aggf in aggfeatures.items():
if verbose: print(" Training a GMM for label %s, using data of shape %s" % (label, str(np.shape(aggf))))
self.gmms[label] = GMM(n_components=10) # , cvtype='full')
self.gmms[label].fit(aggf)
if verbose: print(" Trained %i classes from %i input files" % (len(self.gmms), len(trainingdata)))
def __normalise(self, data):
"Normalises data using the mean and stdev of the training data - so that everything is on a common scale."
return (data - self.means) * self.invstds
def classify(self, wavpath):
"Specify the path to an audio file, and this returns the max-likelihood class, as a string label."
features = self.__normalise(self.file_to_features(wavpath))
# For each label GMM, find the overall log-likelihood and choose the strongest
bestlabel = ''
bestll = -9e99
for label, gmm in self.gmms.items():
ll = gmm.eval(features)[0]
ll = np.sum(ll)
if ll > bestll:
bestll = ll
bestlabel = label
return bestlabel
def file_to_features(self, wavpath):
"Reads through a mono WAV file, converting each frame to the required features. Returns a 2D array."
if verbose: print("Reading %s" % wavpath)
if not os.path.isfile(wavpath): raise ValueError("path %s not found" % wavpath)
sf = Sndfile(wavpath, "r")
#if (sf.channels != 1) and verbose: print(" Sound file has multiple channels (%i) - channels will be mixed to mono." % sf.channels)
if sf.samplerate != fs: raise ValueError("wanted sample rate %g - got %g." % (fs, sf.samplerate))
window = np.hamming(framelen)
features = []
while(True):
try:
chunk = sf.read_frames(framelen, dtype=np.float32)
if len(chunk) != framelen:
print("Not read sufficient samples - returning")
break
if sf.channels != 1:
chunk = np.mean(chunk, 1) # mixdown
framespectrum = np.fft.fft(window * chunk)
magspec = abs(framespectrum[:framelen/2])
# do the frequency warping and MFCC computation
melSpectrum = self.mfccMaker.warpSpectrum(magspec)
melCepstrum = self.mfccMaker.getMFCCs(melSpectrum,cn=True)
melCepstrum = melCepstrum[1:] # exclude zeroth coefficient
melCepstrum = melCepstrum[:13] # limit to lower MFCCs
framefeatures = melCepstrum # todo: include deltas? that can be your homework.
features.append(framefeatures)
except RuntimeError:
break
sf.close()
return np.array(features)
#######################################################################
def trainAndTest(trainpath, trainwavs, testpath, testwavs):
"Handy function for evaluating your code: trains a model, tests it on wavs of known class. Returns (numcorrect, numtotal, numclasses)."
print("TRAINING")
model = Smacpy(trainpath, trainwavs)
print("TESTING")
ncorrect = 0
for wavpath,label in testwavs.items():
result = model.classify(os.path.join(testpath, wavpath))
if verbose: print(" inferred: %s" % result)
if result == label:
ncorrect += 1
return (ncorrect, len(testwavs), len(model.gmms))
#######################################################################
# If this file is invoked as a script, it carries out a simple runthrough
# of training on some wavs, then testing, with classnames being the start of the filenames
if __name__ == '__main__':
# Handle the command-line arguments for where the train/test data comes from:
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--trainpath', default='wavs', help="Path to the WAV files used for training")
parser.add_argument('-T', '--testpath', help="Path to the WAV files used for testing")
parser.add_argument('-q', dest='quiet', action='store_true', help="Be less verbose, don't output much text during processing")
group = parser.add_mutually_exclusive_group()
group.add_argument('-c', '--charsplit', default='_', help="Character used to split filenames: anything BEFORE this character is the class")
group.add_argument('-n', '--numchars' , default=0 , help="Instead of splitting using 'charsplit', use this fixed number of characters from the start of the filename", type=int)
args = vars(parser.parse_args())
verbose = not args['quiet']
if args['testpath']==None:
args['testpath'] = args['trainpath']
# Build up lists of the training and testing WAV files:
wavsfound = {'trainpath':{}, 'testpath':{}}
for onepath in ['trainpath', 'testpath']:
pattern = os.path.join(args[onepath], '*.wav')
for wavpath in glob(pattern):
if args['numchars'] != 0:
label = os.path.basename(wavpath)[:args['numchars']]
else:
label = os.path.basename(wavpath).split(args['charsplit'])[0]
shortwavpath = os.path.relpath(wavpath, args[onepath])
wavsfound[onepath][shortwavpath] = label
if len(wavsfound[onepath])==0:
raise RuntimeError("Found no files using this pattern: %s" % pattern)
if verbose:
print("Class-labels and filenames to be used from %s:" % onepath)
for wavpath,label in sorted(wavsfound[onepath].items()):
print(" %s: \t %s" % (label, wavpath))
if args['testpath'] != args['trainpath']:
# Separate train-and-test collections
ncorrect, ntotal, nclasses = trainAndTest(args['trainpath'], wavsfound['trainpath'], args['testpath'], wavsfound['testpath'])
print("Got %i correct out of %i (trained on %i classes)" % (ncorrect, ntotal, nclasses))
else:
# This runs "stratified leave-one-out crossvalidation": test multiple times by leaving one-of-each-class out and training on the rest.
# First we need to build a list of files grouped by each classlabel
labelsinuse = sorted(list(set(wavsfound['trainpath'].values())))
grouped = {label:[] for label in labelsinuse}
for wavpath,label in wavsfound['trainpath'].items():
grouped[label].append(wavpath)
numfolds = min(len(collection) for collection in grouped.values())
# Each "fold" will be a collection of one item of each label
folds = [{wavpaths[index]:label for label,wavpaths in grouped.items()} for index in range(numfolds)]
totcorrect, tottotal = (0,0)
# Then we go through, each time training on all-but-one and testing on the one left out
for index in range(numfolds):
print("Fold %i of %i" % (index+1, numfolds))
chosenfold = folds[index]
alltherest = {}
for whichfold, otherfold in enumerate(folds):
if whichfold != index:
alltherest.update(otherfold)
ncorrect, ntotal, nclasses = trainAndTest(args['trainpath'], alltherest, args['trainpath'], chosenfold)
totcorrect += ncorrect
tottotal += ntotal
print("Got %i correct out of %i (using stratified leave-one-out crossvalidation, %i folds)" % (totcorrect, tottotal, numfolds))
| {
"content_hash": "7e9cf901b324ec396be7d2b04a096646",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 182,
"avg_line_length": 43.73762376237624,
"alnum_prop": 0.6828522920203736,
"repo_name": "daleloogn/singerID-smacpy-GMM",
"id": "1ad7ea8fee7e55610dca1dc686c4e01aecf8edaa",
"size": "10016",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "smacpy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "14073"
}
],
"symlink_target": ""
} |
import datetime
import time
import json
import arcpy
import copy
import os
import tempfile
import uuid
from spatial import json_to_featureclass
from geometry import Point, MultiPoint, Polygon, Polyline, SpatialReference
from .._abstract.abstract import AbstractGeometry
#from ..agol import featureservice as agolFeatureService
#from ..agol import layer as agolLayer
def _unicode_convert(obj):
""" converts unicode to anscii """
if isinstance(obj, dict):
return {_unicode_convert(key): _unicode_convert(value) for key, value in obj.iteritems()}
elif isinstance(obj, list):
return [_unicode_convert(element) for element in obj]
elif isinstance(obj, unicode):
return obj.encode('utf-8')
else:
return obj
#----------------------------------------------------------------------
def _date_handler(obj):
if isinstance(obj, datetime.datetime):
return local_time_to_online(obj)
#elif isinstance(obj, (agolFeatureService.FeatureService,
#agolLayer.FeatureLayer,
#agolLayer.TableLayer)):
#return dict(obj)
else:
return obj
#----------------------------------------------------------------------
def local_time_to_online(dt=None):
"""
converts datetime object to a UTC timestamp for AGOL
Inputs:
dt - datetime object
Output:
Long value
"""
if dt is None:
dt = datetime.datetime.now()
is_dst = time.daylight and time.localtime().tm_isdst > 0
utc_offset = (time.altzone if is_dst else time.timezone)
return (time.mktime(dt.timetuple()) * 1000) + (utc_offset *1000)
#----------------------------------------------------------------------
def online_time_to_string(value,timeFormat):
"""
Converts a timestamp to date/time string
Inputs:
value - timestamp as long
timeFormat - output date/time format
Output:
string
"""
return datetime.datetime.fromtimestamp(value /1000).strftime(timeFormat)
########################################################################
class Feature(object):
""" returns a feature """
_geom = None
_json = None
_dict = None
_geom = None
_geomType = None
_attributes = None
_wkid = None
#----------------------------------------------------------------------
def __init__(self, json_string, wkid=None):
"""Constructor"""
self._wkid = wkid
if type(json_string) is dict:
if not wkid is None:
json_string['geometry']['spatialReference'] = {"wkid" : wkid}
self._json = json.dumps(json_string,
default=_date_handler)
self._dict = json_string
elif type(json_string) is str:
self._dict = json.loads(json_string)
if not wkid is None:
self._dict['geometry']['spatialReference'] = {"wkid" : wkid}
self._json = json.dumps(self._dict,
default=_date_handler)
else:
raise TypeError("Invalid Input, only dictionary or string allowed")
#----------------------------------------------------------------------
def set_value(self, field_name, value):
""" sets an attribute value for a given field name """
if field_name in self.fields:
if not value is None:
self._dict['attributes'][field_name] = _unicode_convert(value)
self._json = json.dumps(self._dict, default=_date_handler)
else:
pass
elif field_name.upper() in ['SHAPE', 'SHAPE@', "GEOMETRY"]:
if isinstance(value, AbstractGeometry):
if isinstance(value, Point):
self._dict['geometry'] = {
"x" : value.asDictionary['x'],
"y" : value.asDictionary['y']
}
elif isinstance(value, MultiPoint):
self._dict['geometry'] = {
"points" : value.asDictionary['points']
}
elif isinstance(value, Polyline):
self._dict['geometry'] = {
"paths" : value.asDictionary['paths']
}
elif isinstance(value, Polygon):
self._dict['geometry'] = {
"rings" : value.asDictionary['rings']
}
else:
return False
self._json = json.dumps(self._dict, default=_date_handler)
elif isinstance(value, arcpy.Geometry):
if isinstance(value, arcpy.PointGeometry):
self.set_value( field_name, Point(value,value.spatialReference.factoryCode))
elif isinstance(value, arcpy.Multipoint):
self.set_value( field_name, MultiPoint(value,value.spatialReference.factoryCode))
elif isinstance(value, arcpy.Polyline):
self.set_value( field_name, Polyline(value,value.spatialReference.factoryCode))
elif isinstance(value, arcpy.Polygon):
self.set_value( field_name, Polygon(value,value.spatialReference.factoryCode))
else:
return False
return True
#----------------------------------------------------------------------
def get_value(self, field_name):
""" returns a value for a given field name """
if field_name in self.fields:
return self._dict['attributes'][field_name]
elif field_name.upper() in ['SHAPE', 'SHAPE@', "GEOMETRY"]:
return self._dict['geometry']
return None
#----------------------------------------------------------------------
@property
def asDictionary(self):
"""returns the feature as a dictionary"""
feat_dict = {}
if self._geom is not None:
if self._dict.has_key('feature'):
feat_dict['geometry'] = self._dict['feature']['geometry']
elif self._dict.has_key('geometry'):
feat_dict['geometry'] = self._dict['geometry']
if self._dict.has_key("feature"):
feat_dict['attributes'] = self._dict['feature']['attributes']
else:
feat_dict['attributes'] = self._dict['attributes']
return self._dict
#----------------------------------------------------------------------
@property
def asRow(self):
""" converts a feature to a list for insertion into an insert cursor
Output:
[row items], [field names]
returns a list of fields and the row object
"""
fields = self.fields
row = [""] * len(fields)
for k,v in self._attributes.iteritems():
row[fields.index(k)] = v
del v
del k
if self.geometry is not None:
row.append(self.geometry)
fields.append("SHAPE@")
return row, fields
#----------------------------------------------------------------------
@property
def geometry(self):
"""returns the feature geometry"""
if not self._wkid is None:
sr = arcpy.SpatialReference(self._wkid)
else:
sr = None
if self._geom is None:
if self._dict.has_key('feature'):
self._geom = arcpy.AsShape(self._dict['feature']['geometry'], esri_json=True)
elif self._dict.has_key('geometry'):
self._geom = arcpy.AsShape(self._dict['geometry'], esri_json=True)
return self._geom
#----------------------------------------------------------------------
@property
def fields(self):
""" returns a list of feature fields """
if self._dict.has_key("feature"):
self._attributes = self._dict['feature']['attributes']
else:
self._attributes = self._dict['attributes']
return self._attributes.keys()
#----------------------------------------------------------------------
@property
def geometryType(self):
""" returns the feature's geometry type """
if self._geomType is None:
if self.geometry is not None:
self._geomType = self.geometry.type
else:
self._geomType = "Table"
return self._geomType
@staticmethod
def fc_to_features(dataset):
"""
converts a dataset to a list of feature objects
Input:
dataset - path to table or feature class
Output:
list of feature objects
"""
desc = arcpy.Describe(dataset)
fields = [field.name for field in arcpy.ListFields(dataset) if field.type not in ['Geometry']]
date_fields = [field.name for field in arcpy.ListFields(dataset) if field.type =='Date']
non_geom_fields = copy.deepcopy(fields)
features = []
if hasattr(desc, "shapeFieldName"):
fields.append("SHAPE@JSON")
del desc
with arcpy.da.SearchCursor(dataset, fields) as rows:
for row in rows:
row = list(row)
for df in date_fields:
if row[fields.index(df)] != None:
row[fields.index(df)] = int((_date_handler(row[fields.index(df)])))
template = {
"attributes" : dict(zip(non_geom_fields, row))
}
if "SHAPE@JSON" in fields:
template['geometry'] = \
json.loads(row[fields.index("SHAPE@JSON")])
features.append(
Feature(json_string=_unicode_convert(template))
)
del row
return features
#----------------------------------------------------------------------
def __str__(self):
""""""
return json.dumps(self.asDictionary)
########################################################################
class MosaicRuleObject(object):
"""
The image service uses a mosaic rule to mosaick multiple rasters on the
fly. The mosaic rule parameter is used by many image service operations,
for example, export image and identify operations.
"""
__allowedMosaicMethods = [
"esriMosaicNone",
"esriMosaicCenter",
"esriMosaicNadir",
"esriMosaicViewpoint",
"esriMosaicAttribute",
"esriMosaicLockRaster",
"esriMosaicNorthwest",
"esriMosaicSeamline"
]
__allowedMosaicOps = [
"MT_FIRST",
"MT_LAST",
"MT_MIN",
"MT_MAX",
"MT_MEAN",
"MT_BLEND",
"MT_SUM"
]
_mosaicMethod = None
_where = None
_sortField = None
_sortValue = None
_ascending = None
_lockRasterIds = None
_viewpoint = None
_fids = None
_mosaicOperation = None
_itemRenderingRule = None
#----------------------------------------------------------------------
def __init__(self,
mosaicMethod,
where="",
sortField="",
sortValue="",
ascending=True,
lockRasterIds=[],
viewpoint=None,
fids=[],
mosaicOperation=None,
itemRenderingRule=""):
"""Constructor"""
if mosaicMethod in self.__allowedMosaicMethods:
self._mosaicMethod = mosaicMethod
else:
raise AttributeError("Invalid mosaic method.")
self._where = where
self._sortField = sortField
self._sortValue = sortValue
self._ascending = ascending
self._localRasterIds = lockRasterIds
self._itemRenderingRule = itemRenderingRule
if isinstance(viewpoint, Point):
self._viewpoint = viewpoint
self._fids = fids
if mosaicOperation is not None and \
mosaicOperation in self.__allowedMosaicOps:
self._mosaicOperation = mosaicOperation
#----------------------------------------------------------------------
@property
def where(self):
"""
Use where clause to define a subset of rasters used in the mosaic,
be aware that the rasters may not be visible at all scales
"""
return self._where
#----------------------------------------------------------------------
@where.setter
def where(self, value):
"""
Use where clause to define a subset of rasters used in the mosaic,
be aware that the rasters may not be visible at all scales
"""
if value != self._where:
self._where = value
#----------------------------------------------------------------------
@property
def mosaicMethod(self):
"""
get/set the mosaic method
"""
return self._mosaicMethod
#----------------------------------------------------------------------
@mosaicMethod.setter
def mosaicMethod(self, value):
"""
get/set the mosaic method
"""
if value in self.__allowedMosaicMethods and \
self._mosaicMethod != value:
self._mosaicMethod = value
#----------------------------------------------------------------------
@property
def sortField(self):
""""""
return self._sortField
#----------------------------------------------------------------------
@sortField.setter
def sortField(self, value):
""""""
if self._sortField != value:
self._sortField = value
#----------------------------------------------------------------------
@property
def sortValue(self):
""""""
return self._sortValue
#----------------------------------------------------------------------
@sortValue.setter
def sortValue(self, value):
""""""
if self._sortValue != value:
self._sortValue = value
#----------------------------------------------------------------------
@property
def ascending(self):
""""""
return self._ascending
#----------------------------------------------------------------------
@ascending.setter
def ascending(self, value):
""""""
if isinstance(value, bool):
self._ascending = value
#----------------------------------------------------------------------
@property
def lockRasterIds(self):
""""""
return self._lockRasterIds
#----------------------------------------------------------------------
@lockRasterIds.setter
def lockRasterIds(self, value):
""""""
if isinstance(self._lockRasterIds, list):
self._lockRasterIds = value
#----------------------------------------------------------------------
@property
def viewpoint(self):
""""""
return self._viewpoint
#----------------------------------------------------------------------
@viewpoint.setter
def viewpoint(self, value):
""""""
if isinstance(value, Point):
self._viewpoint = value
#----------------------------------------------------------------------
@property
def fids(self):
""""""
return self._fids
#----------------------------------------------------------------------
@fids.setter
def fids(self, value):
""""""
self._fids = value
#----------------------------------------------------------------------
@property
def mosaicOperation(self):
""""""
return self._mosaicOperation
#----------------------------------------------------------------------
@mosaicOperation.setter
def mosaicOperation(self, value):
""""""
if value in self.__allowedMosaicOps and \
self._mosaicOperation != value:
self._mosaicOperation = value
#----------------------------------------------------------------------
@property
def itemRenderingRule(self):
""""""
return self._itemRenderingRule
#----------------------------------------------------------------------
@itemRenderingRule.setter
def itemRenderingRule(self, value):
""""""
if self._itemRenderingRule != value:
self._itemRenderingRule = value
#----------------------------------------------------------------------
@property
def value(self):
"""
gets the mosaic rule object as a dictionary
"""
if self.mosaicMethod == "esriMosaicNone" or\
self.mosaicMethod == "esriMosaicCenter" or \
self.mosaicMethod == "esriMosaicNorthwest" or \
self.mosaicMethod == "esriMosaicNadir":
return {
"mosaicMethod" : "esriMosaicNone",
"where" : self._where,
"ascending" : self._ascending,
"fids" : self.fids,
"mosaicOperation" : self._mosaicOperation
}
elif self.mosaicMethod == "esriMosaicViewpoint":
return {
"mosaicMethod" : "esriMosaicViewpoint",
"viewpoint" : self._viewpoint.asDictionary,
"where" : self._where,
"ascending" : self._ascending,
"fids" : self._fids,
"mosaicOperation" : self._mosaicOperation
}
elif self.mosaicMethod == "esriMosaicAttribute":
return {
"mosaicMethod" : "esriMosaicAttribute",
"sortField" : self._sortField,
"sortValue" : self._sortValue,
"ascending" : self._ascending,
"where" : self._where,
"fids" : self._fids,
"mosaicOperation" : self._mosaicOperation
}
elif self.mosaicMethod == "esriMosaicLockRaster":
return {
"mosaicMethod" : "esriMosaicLockRaster",
"lockRasterIds" : self._localRasterIds,
"where" : self._where,
"ascending" : self._ascending,
"fids" : self._fids,
"mosaicOperation" : self._mosaicOperation
}
elif self.mosaicMethod == "esriMosaicSeamline":
return {
"mosaicMethod" : "esriMosaicSeamline",
"where" : self._where,
"fids" : self._fids,
"mosaicOperation" : self._mosaicOperation
}
else:
raise AttributeError("Invalid Mosaic Method")
########################################################################
class FeatureSet(object):
"""
This featureSet contains Feature objects, including the values for the
fields requested by the user. For layers, if you request geometry
information, the geometry of each feature is also returned in the
featureSet. For tables, the featureSet does not include geometries.
If a spatialReference is not specified at the featureSet level, the
featureSet will assume the spatialReference of its first feature. If
the spatialReference of the first feature is also not specified, the
spatial reference will be UnknownCoordinateSystem.
"""
_fields = None
_features = None
_hasZ = None
_hasM = None
_geometryType = None
_spatialReference = None
_objectIdFieldName = None
_globalIdFieldName = None
_displayFieldName = None
_allowedGeomTypes = ["esriGeometryPoint","esriGeometryMultipoint","esriGeometryPolyline",
"esriGeometryPolygon","esriGeometryEnvelope"]
#----------------------------------------------------------------------
def __init__(self,
fields,
features,
hasZ=False,
hasM=False,
geometryType=None,
spatialReference=None,
displayFieldName=None,
objectIdFieldName=None,
globalIdFieldName=None):
"""Constructor"""
self._fields = fields
self._features = features
self._hasZ = hasZ
self._hasM = hasM
self._geometryType = geometryType
self._spatialReference = spatialReference
self._displayFieldName = displayFieldName
self._objectIdFieldName = objectIdFieldName
self._globalIdFieldName = globalIdFieldName
#----------------------------------------------------------------------
def __str__(self):
"""returns object as string"""
return json.dumps(self.value)
#----------------------------------------------------------------------
@property
def value(self):
"""returns object as dictionary"""
return {
"objectIdFieldName" : self._objectIdFieldName,
"displayFieldName" : self._displayFieldName,
"globalIdFieldName" : self._globalIdFieldName,
"geometryType" : self._geometryType,
"spatialReference" : self._spatialReference,
"hasZ" : self._hasZ,
"hasM" : self._hasM,
"fields" : self._fields,
"features" : [f.asDictionary for f in self._features]
}
#----------------------------------------------------------------------
@property
def toJSON(self):
"""converts the object to JSON"""
return json.dumps(self.value)
#----------------------------------------------------------------------
def __iter__(self):
"""featureset iterator on features in feature set"""
for feature in self._features:
yield feature
#----------------------------------------------------------------------
def __len__(self):
"""returns the length of features in feature set"""
return len(self._features)
#----------------------------------------------------------------------
@staticmethod
def fromJSON(jsonValue):
"""returns a featureset from a JSON string"""
jd = json.loads(jsonValue)
features = []
if jd.has_key('fields'):
fields = jd['fields']
else:
fields = {'fields':[]}
for feat in jd['features']:
features.append(Feature(feat, jd['spatialReference']['latestWkid']))
return FeatureSet(fields,
features,
hasZ=jd['hasZ'] if 'hasZ' in jd else False,
hasM=jd['hasM'] if 'hasM' in jd else False,
geometryType=jd['geometryType'] if 'geometryType' in jd else None,
objectIdFieldName=jd['objectIdFieldName'] if 'objectIdFieldName' in jd else None,
globalIdFieldName=jd['globalIdFieldName'] if 'globalIdFieldName' in jd else None,
displayFieldName=jd['displayFieldName'] if 'displayFieldName' in jd else None,
spatialReference=jd['spatialReference'] if 'spatialReference' in jd else None)
#----------------------------------------------------------------------
@property
def fields(self):
"""gets the featureset's fields"""
return self._fields
#----------------------------------------------------------------------
@property
def spatialReference(self):
"""gets the featureset's spatial reference"""
return self._spatialReference
#----------------------------------------------------------------------
@spatialReference.setter
def spatialReference(self, value):
"""sets the featureset's spatial reference"""
if isinstance(value, SpatialReference):
self._spatialReference = value
elif isinstance(value, int):
self._spatialReference = SpatialReference(wkid=value)
elif isinstance(value, str) and \
str(value).isdigit():
self._spatialReference = SpatialReference(wkid=int(value))
#----------------------------------------------------------------------
@property
def hasZ(self):
"""gets/sets the Z-property"""
return self._hasZ
#----------------------------------------------------------------------
@hasZ.setter
def hasZ(self, value):
"""gets/sets the Z-property"""
if isinstance(value, bool):
self._hasZ = value
#----------------------------------------------------------------------
@property
def hasM(self):
"""gets/set the M-property"""
return self._hasM
#----------------------------------------------------------------------
@hasM.setter
def hasM(self, value):
"""gets/set the M-property"""
if isinstance(value, bool):
self._hasM = value
#----------------------------------------------------------------------
@property
def geometryType(self):
"""gets/sets the geometry Type"""
return self._geometryType
#----------------------------------------------------------------------
@geometryType.setter
def geometryType(self, value):
"""gets/sets the geometry Type"""
if value in self._allowedGeomTypes:
self._geometryType = value
#----------------------------------------------------------------------
@property
def objectIdFieldName(self):
"""gets/sets the object id field"""
return self._objectIdFieldName
#----------------------------------------------------------------------
@objectIdFieldName.setter
def objectIdFieldName(self, value):
"""gets/sets the object id field"""
self._objectIdFieldName = value
#----------------------------------------------------------------------
@property
def globalIdFieldName(self):
"""gets/sets the globalIdFieldName"""
return self._globalIdFieldName
#----------------------------------------------------------------------
@globalIdFieldName.setter
def globalIdFieldName(self, value):
"""gets/sets the globalIdFieldName"""
self._globalIdFieldName = value
#----------------------------------------------------------------------
@property
def displayFieldName(self):
"""gets/sets the displayFieldName"""
return self._displayFieldName
#----------------------------------------------------------------------
@displayFieldName.setter
def displayFieldName(self, value):
"""gets/sets the displayFieldName"""
self._displayFieldName = value
#----------------------------------------------------------------------
def save(self, saveLocation, outName):
"""
Saves a featureset object to a feature class
Input:
saveLocation - output location of the data
outName - name of the table the data will be saved to
"""
tempDir = tempfile.gettempdir()
tempFile = os.path.join(tempDir, "%s.json" % uuid.uuid4().get_hex())
with open(tempFile, 'wb') as writer:
writer.write(str(self))
writer.flush()
writer.close()
del writer
res = json_to_featureclass(json_file=tempFile,
out_fc=os.path.join(saveLocation, outName))
os.remove(tempFile)
return res
#----------------------------------------------------------------------
@property
def features(self):
"""gets the features in the FeatureSet"""
return self._features
| {
"content_hash": "fcd205c999a9be9d29f01595a4ccf718",
"timestamp": "",
"source": "github",
"line_count": 699,
"max_line_length": 107,
"avg_line_length": 39.58369098712446,
"alnum_prop": 0.4661534569373667,
"repo_name": "achapkowski/ArcREST",
"id": "b308b16ab4979c22728cbb43cea011f6109ce927",
"size": "27669",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/arcrest/common/general.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1234325"
}
],
"symlink_target": ""
} |
"""Factories to help in tests."""
import factory
from factory.base import Factory
from faker import Faker
from tests.models import Category, UberCategory, Widget
fake = Faker()
class WidgetFactory(Factory):
"""Widget factory."""
class Meta:
"""Factory configuration."""
model = Widget
name = factory.Faker("word")
class CategoryFactory(Factory):
"""User factory."""
class Meta:
"""Factory configuration."""
model = Category
name = factory.Faker("word")
summary = factory.Faker("paragraph")
icon = factory.Faker("image_url")
image = factory.Faker("image_url")
class UberCategoryFactory(Factory):
"""User factory."""
class Meta:
"""Factory configuration."""
model = UberCategory
name = factory.Faker("word")
| {
"content_hash": "dc151cbcbe46b5ddf80634892df64235",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 55,
"avg_line_length": 20.871794871794872,
"alnum_prop": 0.6412776412776413,
"repo_name": "xuru/flywheel-reference-field",
"id": "0a78eb9ff6ecdb916b9f8eaa7ee38a2eb0eaa910",
"size": "838",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/factories.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25071"
},
{
"name": "Shell",
"bytes": "627"
}
],
"symlink_target": ""
} |
import json
import os
from reviewInfo.models import ReviewInfo
REPO_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
FILE_NAME = os.path.join(REPO_DIR,'data.json')
count = 0.0
with open(FILE_NAME) as f:
records = json.load(f)
for record in records:
count += 1.0
print(count/len(records))
sku = record['sku']
rating = record['rating']
title = record['title']
comment = record['comment']
time = record['submissionTime']
reviewer = record['reviewer'][0]['name']
r = ReviewInfo(sku=sku, rating=rating, title=title, comment=comment,
submissionTime=time, reviewer=reviewer)
r.save()
| {
"content_hash": "f6d160aaa697982b29c5848481fdcd63",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 105,
"avg_line_length": 26.607142857142858,
"alnum_prop": 0.6174496644295302,
"repo_name": "riffschelder/reviewshub",
"id": "43825636b9c5dadb161388162ef95b462e0d0006",
"size": "745",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reviewsHub/reviewInfo/scripts/import_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19914"
}
],
"symlink_target": ""
} |
"""
Miscellaneous data helpers, including functions for converting integers to and
from bytes and UTC timezone. Exports the following items:
- OrderedDict()
- int_from_bytes()
- int_to_bytes()
- timezone.utc
- inet_ntop()
- inet_pton()
- uri_to_iri()
- iri_to_uri()
"""
from __future__ import unicode_literals, division, absolute_import, print_function
import math
import sys
from datetime import datetime, date, time
from ._errors import unwrap
from ._iri import iri_to_uri, uri_to_iri # noqa
from ._ordereddict import OrderedDict # noqa
from ._types import type_name
if sys.platform == 'win32':
from ._inet import inet_ntop, inet_pton
else:
from socket import inet_ntop, inet_pton # noqa
# Python 2
if sys.version_info <= (3,):
from datetime import timedelta, tzinfo
py2 = True
def int_to_bytes(value, signed=False, width=None):
"""
Converts an integer to a byte string
:param value:
The integer to convert
:param signed:
If the byte string should be encoded using two's complement
:param width:
None == auto, otherwise an integer of the byte width for the return
value
:return:
A byte string
"""
# Handle negatives in two's complement
is_neg = False
if signed and value < 0:
is_neg = True
bits = int(math.ceil(len('%x' % abs(value)) / 2.0) * 8)
value = (value + (1 << bits)) % (1 << bits)
hex_str = '%x' % value
if len(hex_str) & 1:
hex_str = '0' + hex_str
output = hex_str.decode('hex')
if signed and not is_neg and ord(output[0:1]) & 0x80:
output = b'\x00' + output
if width is not None:
if is_neg:
pad_char = b'\xFF'
else:
pad_char = b'\x00'
output = (pad_char * (width - len(output))) + output
elif is_neg and ord(output[0:1]) & 0x80 == 0:
output = b'\xFF' + output
return output
def int_from_bytes(value, signed=False):
"""
Converts a byte string to an integer
:param value:
The byte string to convert
:param signed:
If the byte string should be interpreted using two's complement
:return:
An integer
"""
if value == b'':
return 0
num = long(value.encode("hex"), 16) # noqa
if not signed:
return num
# Check for sign bit and handle two's complement
if ord(value[0:1]) & 0x80:
bit_len = len(value) * 8
return num - (1 << bit_len)
return num
class utc(tzinfo): # noqa
def tzname(self, _):
return b'UTC+00:00'
def utcoffset(self, _):
return timedelta(0)
def dst(self, _):
return timedelta(0)
class timezone(): # noqa
utc = utc()
# Python 3
else:
from datetime import timezone # noqa
py2 = False
def int_to_bytes(value, signed=False, width=None):
"""
Converts an integer to a byte string
:param value:
The integer to convert
:param signed:
If the byte string should be encoded using two's complement
:param width:
None == auto, otherwise an integer of the byte width for the return
value
:return:
A byte string
"""
if width is None:
if signed:
if value < 0:
bits_required = abs(value + 1).bit_length()
else:
bits_required = value.bit_length()
if bits_required % 8 == 0:
bits_required += 1
else:
bits_required = value.bit_length()
width = math.ceil(bits_required / 8) or 1
return value.to_bytes(width, byteorder='big', signed=signed)
def int_from_bytes(value, signed=False):
"""
Converts a byte string to an integer
:param value:
The byte string to convert
:param signed:
If the byte string should be interpreted using two's complement
:return:
An integer
"""
return int.from_bytes(value, 'big', signed=signed)
_DAYS_PER_MONTH_YEAR_0 = {
1: 31,
2: 29, # Year 0 was a leap year
3: 31,
4: 30,
5: 31,
6: 30,
7: 31,
8: 31,
9: 30,
10: 31,
11: 30,
12: 31
}
class extended_date(object):
"""
A datetime.date-like object that can represent the year 0. This is just
to handle 0000-01-01 found in some certificates.
"""
year = None
month = None
day = None
def __init__(self, year, month, day):
"""
:param year:
The integer 0
:param month:
An integer from 1 to 12
:param day:
An integer from 1 to 31
"""
if year != 0:
raise ValueError('year must be 0')
if month < 1 or month > 12:
raise ValueError('month is out of range')
if day < 0 or day > _DAYS_PER_MONTH_YEAR_0[month]:
raise ValueError('day is out of range')
self.year = year
self.month = month
self.day = day
def _format(self, format):
"""
Performs strftime(), always returning a unicode string
:param format:
A strftime() format string
:return:
A unicode string of the formatted date
"""
format = format.replace('%Y', '0000')
# Year 0 is 1BC and a leap year. Leap years repeat themselves
# every 28 years. Because of adjustments and the proleptic gregorian
# calendar, the simplest way to format is to substitute year 2000.
temp = date(2000, self.month, self.day)
if '%c' in format:
c_out = temp.strftime('%c')
# Handle full years
c_out = c_out.replace('2000', '0000')
c_out = c_out.replace('%', '%%')
format = format.replace('%c', c_out)
if '%x' in format:
x_out = temp.strftime('%x')
# Handle formats such as 08/16/2000 or 16.08.2000
x_out = x_out.replace('2000', '0000')
x_out = x_out.replace('%', '%%')
format = format.replace('%x', x_out)
return temp.strftime(format)
def isoformat(self):
"""
Formats the date as %Y-%m-%d
:return:
The date formatted to %Y-%m-%d as a unicode string in Python 3
and a byte string in Python 2
"""
return self.strftime('0000-%m-%d')
def strftime(self, format):
"""
Formats the date using strftime()
:param format:
The strftime() format string
:return:
The formatted date as a unicode string in Python 3 and a byte
string in Python 2
"""
output = self._format(format)
if py2:
return output.encode('utf-8')
return output
def replace(self, year=None, month=None, day=None):
"""
Returns a new datetime.date or asn1crypto.util.extended_date
object with the specified components replaced
:return:
A datetime.date or asn1crypto.util.extended_date object
"""
if year is None:
year = self.year
if month is None:
month = self.month
if day is None:
day = self.day
if year > 0:
cls = date
else:
cls = extended_date
return cls(
year,
month,
day
)
def __str__(self):
if py2:
return self.__bytes__()
else:
return self.__unicode__()
def __bytes__(self):
return self.__unicode__().encode('utf-8')
def __unicode__(self):
return self._format('%Y-%m-%d')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__cmp__(other) == 0
def __ne__(self, other):
return not self.__eq__(other)
def _comparison_error(self, other):
raise TypeError(unwrap(
'''
An asn1crypto.util.extended_date object can only be compared to
an asn1crypto.util.extended_date or datetime.date object, not %s
''',
type_name(other)
))
def __cmp__(self, other):
if isinstance(other, date):
return -1
if not isinstance(other, self.__class__):
self._comparison_error(other)
st = (
self.year,
self.month,
self.day
)
ot = (
other.year,
other.month,
other.day
)
if st < ot:
return -1
if st > ot:
return 1
return 0
def __lt__(self, other):
return self.__cmp__(other) < 0
def __le__(self, other):
return self.__cmp__(other) <= 0
def __gt__(self, other):
return self.__cmp__(other) > 0
def __ge__(self, other):
return self.__cmp__(other) >= 0
class extended_datetime(object):
"""
A datetime.datetime-like object that can represent the year 0. This is just
to handle 0000-01-01 found in some certificates.
"""
year = None
month = None
day = None
hour = None
minute = None
second = None
microsecond = None
tzinfo = None
def __init__(self, year, month, day, hour=0, minute=0, second=0, microsecond=0, tzinfo=None):
"""
:param year:
The integer 0
:param month:
An integer from 1 to 12
:param day:
An integer from 1 to 31
:param hour:
An integer from 0 to 23
:param minute:
An integer from 0 to 59
:param second:
An integer from 0 to 59
:param microsecond:
An integer from 0 to 999999
"""
if year != 0:
raise ValueError('year must be 0')
if month < 1 or month > 12:
raise ValueError('month is out of range')
if day < 0 or day > _DAYS_PER_MONTH_YEAR_0[month]:
raise ValueError('day is out of range')
if hour < 0 or hour > 23:
raise ValueError('hour is out of range')
if minute < 0 or minute > 59:
raise ValueError('minute is out of range')
if second < 0 or second > 59:
raise ValueError('second is out of range')
if microsecond < 0 or microsecond > 999999:
raise ValueError('microsecond is out of range')
self.year = year
self.month = month
self.day = day
self.hour = hour
self.minute = minute
self.second = second
self.microsecond = microsecond
self.tzinfo = tzinfo
def date(self):
"""
:return:
An asn1crypto.util.extended_date of the date
"""
return extended_date(self.year, self.month, self.day)
def time(self):
"""
:return:
A datetime.time object of the time
"""
return time(self.hour, self.minute, self.second, self.microsecond, self.tzinfo)
def utcoffset(self):
"""
:return:
None or a datetime.timedelta() of the offset from UTC
"""
if self.tzinfo is None:
return None
return self.tzinfo.utcoffset(self.replace(year=2000))
def dst(self):
"""
:return:
None or a datetime.timedelta() of the daylight savings time offset
"""
if self.tzinfo is None:
return None
return self.tzinfo.dst(self.replace(year=2000))
def tzname(self):
"""
:return:
None or the name of the timezone as a unicode string in Python 3
and a byte string in Python 2
"""
if self.tzinfo is None:
return None
return self.tzinfo.tzname(self.replace(year=2000))
def _format(self, format):
"""
Performs strftime(), always returning a unicode string
:param format:
A strftime() format string
:return:
A unicode string of the formatted datetime
"""
format = format.replace('%Y', '0000')
# Year 0 is 1BC and a leap year. Leap years repeat themselves
# every 28 years. Because of adjustments and the proleptic gregorian
# calendar, the simplest way to format is to substitute year 2000.
temp = datetime(
2000,
self.month,
self.day,
self.hour,
self.minute,
self.second,
self.microsecond,
self.tzinfo
)
if '%c' in format:
c_out = temp.strftime('%c')
# Handle full years
c_out = c_out.replace('2000', '0000')
c_out = c_out.replace('%', '%%')
format = format.replace('%c', c_out)
if '%x' in format:
x_out = temp.strftime('%x')
# Handle formats such as 08/16/2000 or 16.08.2000
x_out = x_out.replace('2000', '0000')
x_out = x_out.replace('%', '%%')
format = format.replace('%x', x_out)
return temp.strftime(format)
def isoformat(self, sep='T'):
"""
Formats the date as "%Y-%m-%d %H:%M:%S" with the sep param between the
date and time portions
:param set:
A single character of the separator to place between the date and
time
:return:
The formatted datetime as a unicode string in Python 3 and a byte
string in Python 2
"""
if self.microsecond == 0:
return self.strftime('0000-%%m-%%d%s%%H:%%M:%%S' % sep)
return self.strftime('0000-%%m-%%d%s%%H:%%M:%%S.%%f' % sep)
def strftime(self, format):
"""
Formats the date using strftime()
:param format:
The strftime() format string
:return:
The formatted date as a unicode string in Python 3 and a byte
string in Python 2
"""
output = self._format(format)
if py2:
return output.encode('utf-8')
return output
def replace(self, year=None, month=None, day=None, hour=None, minute=None,
second=None, microsecond=None, tzinfo=None):
"""
Returns a new datetime.datetime or asn1crypto.util.extended_datetime
object with the specified components replaced
:return:
A datetime.datetime or asn1crypto.util.extended_datetime object
"""
if year is None:
year = self.year
if month is None:
month = self.month
if day is None:
day = self.day
if hour is None:
hour = self.hour
if minute is None:
minute = self.minute
if second is None:
second = self.second
if microsecond is None:
microsecond = self.microsecond
if tzinfo is None:
tzinfo = self.tzinfo
if year > 0:
cls = datetime
else:
cls = extended_datetime
return cls(
year,
month,
day,
hour,
minute,
second,
microsecond,
tzinfo
)
def __str__(self):
if py2:
return self.__bytes__()
else:
return self.__unicode__()
def __bytes__(self):
return self.__unicode__().encode('utf-8')
def __unicode__(self):
format = '%Y-%m-%d %H:%M:%S'
if self.microsecond != 0:
format += '.%f'
return self._format(format)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__cmp__(other) == 0
def __ne__(self, other):
return not self.__eq__(other)
def _comparison_error(self, other):
"""
Raises a TypeError about the other object not being suitable for
comparison
:param other:
The object being compared to
"""
raise TypeError(unwrap(
'''
An asn1crypto.util.extended_datetime object can only be compared to
an asn1crypto.util.extended_datetime or datetime.datetime object,
not %s
''',
type_name(other)
))
def __cmp__(self, other):
so = self.utcoffset()
oo = other.utcoffset()
if (so is not None and oo is None) or (so is None and oo is not None):
raise TypeError("can't compare offset-naive and offset-aware datetimes")
if isinstance(other, datetime):
return -1
if not isinstance(other, self.__class__):
self._comparison_error(other)
st = (
self.year,
self.month,
self.day,
self.hour,
self.minute,
self.second,
self.microsecond,
so
)
ot = (
other.year,
other.month,
other.day,
other.hour,
other.minute,
other.second,
other.microsecond,
oo
)
if st < ot:
return -1
if st > ot:
return 1
return 0
def __lt__(self, other):
return self.__cmp__(other) < 0
def __le__(self, other):
return self.__cmp__(other) <= 0
def __gt__(self, other):
return self.__cmp__(other) > 0
def __ge__(self, other):
return self.__cmp__(other) >= 0
| {
"content_hash": "353d6016072618a223980c7689126660",
"timestamp": "",
"source": "github",
"line_count": 710,
"max_line_length": 97,
"avg_line_length": 25.388732394366198,
"alnum_prop": 0.5137579052479752,
"repo_name": "xq262144/hue",
"id": "2e55ef85834881f80376f3be871c58980a0ecedb",
"size": "18043",
"binary": false,
"copies": "25",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/asn1crypto-0.22.0/asn1crypto/util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3096"
},
{
"name": "Batchfile",
"bytes": "41710"
},
{
"name": "C",
"bytes": "2692409"
},
{
"name": "C++",
"bytes": "199897"
},
{
"name": "CSS",
"bytes": "521820"
},
{
"name": "Emacs Lisp",
"bytes": "11704"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Go",
"bytes": "6671"
},
{
"name": "Groff",
"bytes": "16669"
},
{
"name": "HTML",
"bytes": "24188238"
},
{
"name": "Java",
"bytes": "575404"
},
{
"name": "JavaScript",
"bytes": "4987047"
},
{
"name": "M4",
"bytes": "1377"
},
{
"name": "Makefile",
"bytes": "144341"
},
{
"name": "Mako",
"bytes": "3052598"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "PLSQL",
"bytes": "13774"
},
{
"name": "PLpgSQL",
"bytes": "3646"
},
{
"name": "Perl",
"bytes": "3499"
},
{
"name": "PigLatin",
"bytes": "328"
},
{
"name": "Python",
"bytes": "44291483"
},
{
"name": "Shell",
"bytes": "44147"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "Thrift",
"bytes": "278712"
},
{
"name": "Visual Basic",
"bytes": "2884"
},
{
"name": "XSLT",
"bytes": "518588"
}
],
"symlink_target": ""
} |
"""Default tags used by the template system, available to all templates."""
import sys
import re
from itertools import groupby, cycle as itertools_cycle
from google.appengine._internal.django.template import Node, NodeList, Template, Context, Variable
from google.appengine._internal.django.template import TemplateSyntaxError, VariableDoesNotExist, BLOCK_TAG_START, BLOCK_TAG_END, VARIABLE_TAG_START, VARIABLE_TAG_END, SINGLE_BRACE_START, SINGLE_BRACE_END, COMMENT_TAG_START, COMMENT_TAG_END
from google.appengine._internal.django.template import get_library, Library, InvalidTemplateLibrary
from google.appengine._internal.django.template.smartif import IfParser, Literal
from google.appengine._internal.django.conf import settings
from google.appengine._internal.django.utils.encoding import smart_str, smart_unicode
from google.appengine._internal.django.utils.safestring import mark_safe
register = Library()
# Regex for token keyword arguments
kwarg_re = re.compile(r"(?:(\w+)=)?(.+)")
class AutoEscapeControlNode(Node):
"""Implements the actions of the autoescape tag."""
def __init__(self, setting, nodelist):
self.setting, self.nodelist = setting, nodelist
def render(self, context):
old_setting = context.autoescape
context.autoescape = self.setting
output = self.nodelist.render(context)
context.autoescape = old_setting
if self.setting:
return mark_safe(output)
else:
return output
class CommentNode(Node):
def render(self, context):
return ''
class CsrfTokenNode(Node):
def render(self, context):
csrf_token = context.get('csrf_token', None)
if csrf_token:
if csrf_token == 'NOTPROVIDED':
return mark_safe(u"")
else:
return mark_safe(u"<div style='display:none'><input type='hidden' name='csrfmiddlewaretoken' value='%s' /></div>" % csrf_token)
else:
# It's very probable that the token is missing because of
# misconfiguration, so we raise a warning
from google.appengine._internal.django.conf import settings
if settings.DEBUG:
import warnings
warnings.warn("A {% csrf_token %} was used in a template, but the context did not provide the value. This is usually caused by not using RequestContext.")
return u''
class CycleNode(Node):
def __init__(self, cyclevars, variable_name=None):
self.cyclevars = cyclevars
self.variable_name = variable_name
def render(self, context):
if self not in context.render_context:
context.render_context[self] = itertools_cycle(self.cyclevars)
cycle_iter = context.render_context[self]
value = cycle_iter.next().resolve(context)
if self.variable_name:
context[self.variable_name] = value
return value
class DebugNode(Node):
def render(self, context):
from pprint import pformat
output = [pformat(val) for val in context]
output.append('\n\n')
output.append(pformat(sys.modules))
return ''.join(output)
class FilterNode(Node):
def __init__(self, filter_expr, nodelist):
self.filter_expr, self.nodelist = filter_expr, nodelist
def render(self, context):
output = self.nodelist.render(context)
# Apply filters.
context.update({'var': output})
filtered = self.filter_expr.resolve(context)
context.pop()
return filtered
class FirstOfNode(Node):
def __init__(self, vars):
self.vars = vars
def render(self, context):
for var in self.vars:
value = var.resolve(context, True)
if value:
return smart_unicode(value)
return u''
class ForNode(Node):
child_nodelists = ('nodelist_loop', 'nodelist_empty')
def __init__(self, loopvars, sequence, is_reversed, nodelist_loop, nodelist_empty=None):
self.loopvars, self.sequence = loopvars, sequence
self.is_reversed = is_reversed
self.nodelist_loop = nodelist_loop
if nodelist_empty is None:
self.nodelist_empty = NodeList()
else:
self.nodelist_empty = nodelist_empty
def __repr__(self):
reversed_text = self.is_reversed and ' reversed' or ''
return "<For Node: for %s in %s, tail_len: %d%s>" % (', '.join(self.loopvars), self.sequence, len(self.nodelist_loop),
reversed_text)
def __iter__(self):
for node in self.nodelist_loop:
yield node
for node in self.nodelist_empty:
yield node
def render(self, context):
if 'forloop' in context:
parentloop = context['forloop']
else:
parentloop = {}
context.push()
try:
values = self.sequence.resolve(context, True)
except VariableDoesNotExist:
values = []
if values is None:
values = []
if not hasattr(values, '__len__'):
values = list(values)
len_values = len(values)
if len_values < 1:
context.pop()
return self.nodelist_empty.render(context)
nodelist = NodeList()
if self.is_reversed:
values = reversed(values)
unpack = len(self.loopvars) > 1
# Create a forloop value in the context. We'll update counters on each
# iteration just below.
loop_dict = context['forloop'] = {'parentloop': parentloop}
for i, item in enumerate(values):
# Shortcuts for current loop iteration number.
loop_dict['counter0'] = i
loop_dict['counter'] = i+1
# Reverse counter iteration numbers.
loop_dict['revcounter'] = len_values - i
loop_dict['revcounter0'] = len_values - i - 1
# Boolean values designating first and last times through loop.
loop_dict['first'] = (i == 0)
loop_dict['last'] = (i == len_values - 1)
pop_context = False
if unpack:
# If there are multiple loop variables, unpack the item into
# them.
try:
unpacked_vars = dict(zip(self.loopvars, item))
except TypeError:
pass
else:
pop_context = True
context.update(unpacked_vars)
else:
context[self.loopvars[0]] = item
for node in self.nodelist_loop:
nodelist.append(node.render(context))
if pop_context:
# The loop variables were pushed on to the context so pop them
# off again. This is necessary because the tag lets the length
# of loopvars differ to the length of each set of items and we
# don't want to leave any vars from the previous loop on the
# context.
context.pop()
context.pop()
return nodelist.render(context)
class IfChangedNode(Node):
child_nodelists = ('nodelist_true', 'nodelist_false')
def __init__(self, nodelist_true, nodelist_false, *varlist):
self.nodelist_true, self.nodelist_false = nodelist_true, nodelist_false
self._last_seen = None
self._varlist = varlist
self._id = str(id(self))
def render(self, context):
if 'forloop' in context and self._id not in context['forloop']:
self._last_seen = None
context['forloop'][self._id] = 1
try:
if self._varlist:
# Consider multiple parameters. This automatically behaves
# like an OR evaluation of the multiple variables.
compare_to = [var.resolve(context, True) for var in self._varlist]
else:
compare_to = self.nodelist_true.render(context)
except VariableDoesNotExist:
compare_to = None
if compare_to != self._last_seen:
firstloop = (self._last_seen == None)
self._last_seen = compare_to
content = self.nodelist_true.render(context)
return content
elif self.nodelist_false:
return self.nodelist_false.render(context)
return ''
class IfEqualNode(Node):
child_nodelists = ('nodelist_true', 'nodelist_false')
def __init__(self, var1, var2, nodelist_true, nodelist_false, negate):
self.var1, self.var2 = var1, var2
self.nodelist_true, self.nodelist_false = nodelist_true, nodelist_false
self.negate = negate
def __repr__(self):
return "<IfEqualNode>"
def render(self, context):
val1 = self.var1.resolve(context, True)
val2 = self.var2.resolve(context, True)
if (self.negate and val1 != val2) or (not self.negate and val1 == val2):
return self.nodelist_true.render(context)
return self.nodelist_false.render(context)
class IfNode(Node):
child_nodelists = ('nodelist_true', 'nodelist_false')
def __init__(self, var, nodelist_true, nodelist_false=None):
self.nodelist_true, self.nodelist_false = nodelist_true, nodelist_false
self.var = var
def __repr__(self):
return "<If node>"
def __iter__(self):
for node in self.nodelist_true:
yield node
for node in self.nodelist_false:
yield node
def render(self, context):
try:
var = self.var.eval(context)
except VariableDoesNotExist:
var = None
if var:
return self.nodelist_true.render(context)
else:
return self.nodelist_false.render(context)
class RegroupNode(Node):
def __init__(self, target, expression, var_name):
self.target, self.expression = target, expression
self.var_name = var_name
def render(self, context):
obj_list = self.target.resolve(context, True)
if obj_list == None:
# target variable wasn't found in context; fail silently.
context[self.var_name] = []
return ''
# List of dictionaries in the format:
# {'grouper': 'key', 'list': [list of contents]}.
context[self.var_name] = [
{'grouper': key, 'list': list(val)}
for key, val in
groupby(obj_list, lambda v, f=self.expression.resolve: f(v, True))
]
return ''
def include_is_allowed(filepath):
for root in settings.ALLOWED_INCLUDE_ROOTS:
if filepath.startswith(root):
return True
return False
class SsiNode(Node):
def __init__(self, filepath, parsed):
self.filepath, self.parsed = filepath, parsed
def render(self, context):
if not include_is_allowed(self.filepath):
if settings.DEBUG:
return "[Didn't have permission to include file]"
else:
return '' # Fail silently for invalid includes.
try:
fp = open(self.filepath, 'r')
output = fp.read()
fp.close()
except IOError:
output = ''
if self.parsed:
try:
t = Template(output, name=self.filepath)
return t.render(context)
except TemplateSyntaxError, e:
if settings.DEBUG:
return "[Included template had syntax error: %s]" % e
else:
return '' # Fail silently for invalid included templates.
return output
class LoadNode(Node):
def render(self, context):
return ''
class NowNode(Node):
def __init__(self, format_string):
self.format_string = format_string
def render(self, context):
from datetime import datetime
from google.appengine._internal.django.utils.dateformat import DateFormat
df = DateFormat(datetime.now())
return df.format(self.format_string)
class SpacelessNode(Node):
def __init__(self, nodelist):
self.nodelist = nodelist
def render(self, context):
from google.appengine._internal.django.utils.html import strip_spaces_between_tags
return strip_spaces_between_tags(self.nodelist.render(context).strip())
class TemplateTagNode(Node):
mapping = {'openblock': BLOCK_TAG_START,
'closeblock': BLOCK_TAG_END,
'openvariable': VARIABLE_TAG_START,
'closevariable': VARIABLE_TAG_END,
'openbrace': SINGLE_BRACE_START,
'closebrace': SINGLE_BRACE_END,
'opencomment': COMMENT_TAG_START,
'closecomment': COMMENT_TAG_END,
}
def __init__(self, tagtype):
self.tagtype = tagtype
def render(self, context):
return self.mapping.get(self.tagtype, '')
class URLNode(Node):
def __init__(self, view_name, args, kwargs, asvar):
self.view_name = view_name
self.args = args
self.kwargs = kwargs
self.asvar = asvar
def render(self, context):
from google.appengine._internal.django.core.urlresolvers import reverse, NoReverseMatch
args = [arg.resolve(context) for arg in self.args]
kwargs = dict([(smart_str(k,'ascii'), v.resolve(context))
for k, v in self.kwargs.items()])
# Try to look up the URL twice: once given the view name, and again
# relative to what we guess is the "main" app. If they both fail,
# re-raise the NoReverseMatch unless we're using the
# {% url ... as var %} construct in which cause return nothing.
url = ''
try:
url = reverse(self.view_name, args=args, kwargs=kwargs, current_app=context.current_app)
except NoReverseMatch, e:
if settings.SETTINGS_MODULE:
project_name = settings.SETTINGS_MODULE.split('.')[0]
try:
url = reverse(project_name + '.' + self.view_name,
args=args, kwargs=kwargs, current_app=context.current_app)
except NoReverseMatch:
if self.asvar is None:
# Re-raise the original exception, not the one with
# the path relative to the project. This makes a
# better error message.
raise e
else:
if self.asvar is None:
raise e
if self.asvar:
context[self.asvar] = url
return ''
else:
return url
class WidthRatioNode(Node):
def __init__(self, val_expr, max_expr, max_width):
self.val_expr = val_expr
self.max_expr = max_expr
self.max_width = max_width
def render(self, context):
try:
value = self.val_expr.resolve(context)
maxvalue = self.max_expr.resolve(context)
max_width = int(self.max_width.resolve(context))
except VariableDoesNotExist:
return ''
except ValueError:
raise TemplateSyntaxError("widthratio final argument must be an number")
try:
value = float(value)
maxvalue = float(maxvalue)
ratio = (value / maxvalue) * max_width
except (ValueError, ZeroDivisionError):
return ''
return str(int(round(ratio)))
class WithNode(Node):
def __init__(self, var, name, nodelist):
self.var = var
self.name = name
self.nodelist = nodelist
def __repr__(self):
return "<WithNode>"
def render(self, context):
val = self.var.resolve(context)
context.push()
context[self.name] = val
output = self.nodelist.render(context)
context.pop()
return output
#@register.tag
def autoescape(parser, token):
"""
Force autoescape behaviour for this block.
"""
args = token.contents.split()
if len(args) != 2:
raise TemplateSyntaxError("'autoescape' tag requires exactly one argument.")
arg = args[1]
if arg not in (u'on', u'off'):
raise TemplateSyntaxError("'autoescape' argument should be 'on' or 'off'")
nodelist = parser.parse(('endautoescape',))
parser.delete_first_token()
return AutoEscapeControlNode((arg == 'on'), nodelist)
autoescape = register.tag(autoescape)
#@register.tag
def comment(parser, token):
"""
Ignores everything between ``{% comment %}`` and ``{% endcomment %}``.
"""
parser.skip_past('endcomment')
return CommentNode()
comment = register.tag(comment)
#@register.tag
def cycle(parser, token):
"""
Cycles among the given strings each time this tag is encountered.
Within a loop, cycles among the given strings each time through
the loop::
{% for o in some_list %}
<tr class="{% cycle 'row1' 'row2' %}">
...
</tr>
{% endfor %}
Outside of a loop, give the values a unique name the first time you call
it, then use that name each sucessive time through::
<tr class="{% cycle 'row1' 'row2' 'row3' as rowcolors %}">...</tr>
<tr class="{% cycle rowcolors %}">...</tr>
<tr class="{% cycle rowcolors %}">...</tr>
You can use any number of values, separated by spaces. Commas can also
be used to separate values; if a comma is used, the cycle values are
interpreted as literal strings.
"""
# Note: This returns the exact same node on each {% cycle name %} call;
# that is, the node object returned from {% cycle a b c as name %} and the
# one returned from {% cycle name %} are the exact same object. This
# shouldn't cause problems (heh), but if it does, now you know.
#
# Ugly hack warning: This stuffs the named template dict into parser so
# that names are only unique within each template (as opposed to using
# a global variable, which would make cycle names have to be unique across
# *all* templates.
args = token.split_contents()
if len(args) < 2:
raise TemplateSyntaxError("'cycle' tag requires at least two arguments")
if ',' in args[1]:
# Backwards compatibility: {% cycle a,b %} or {% cycle a,b as foo %}
# case.
args[1:2] = ['"%s"' % arg for arg in args[1].split(",")]
if len(args) == 2:
# {% cycle foo %} case.
name = args[1]
if not hasattr(parser, '_namedCycleNodes'):
raise TemplateSyntaxError("No named cycles in template. '%s' is not defined" % name)
if not name in parser._namedCycleNodes:
raise TemplateSyntaxError("Named cycle '%s' does not exist" % name)
return parser._namedCycleNodes[name]
if len(args) > 4 and args[-2] == 'as':
name = args[-1]
values = [parser.compile_filter(arg) for arg in args[1:-2]]
node = CycleNode(values, name)
if not hasattr(parser, '_namedCycleNodes'):
parser._namedCycleNodes = {}
parser._namedCycleNodes[name] = node
else:
values = [parser.compile_filter(arg) for arg in args[1:]]
node = CycleNode(values)
return node
cycle = register.tag(cycle)
def csrf_token(parser, token):
return CsrfTokenNode()
register.tag(csrf_token)
def debug(parser, token):
"""
Outputs a whole load of debugging information, including the current
context and imported modules.
Sample usage::
<pre>
{% debug %}
</pre>
"""
return DebugNode()
debug = register.tag(debug)
#@register.tag(name="filter")
def do_filter(parser, token):
"""
Filters the contents of the block through variable filters.
Filters can also be piped through each other, and they can have
arguments -- just like in variable syntax.
Sample usage::
{% filter force_escape|lower %}
This text will be HTML-escaped, and will appear in lowercase.
{% endfilter %}
"""
_, rest = token.contents.split(None, 1)
filter_expr = parser.compile_filter("var|%s" % (rest))
for func, unused in filter_expr.filters:
if getattr(func, '_decorated_function', func).__name__ in ('escape', 'safe'):
raise TemplateSyntaxError('"filter %s" is not permitted. Use the "autoescape" tag instead.' % func.__name__)
nodelist = parser.parse(('endfilter',))
parser.delete_first_token()
return FilterNode(filter_expr, nodelist)
do_filter = register.tag("filter", do_filter)
#@register.tag
def firstof(parser, token):
"""
Outputs the first variable passed that is not False, without escaping.
Outputs nothing if all the passed variables are False.
Sample usage::
{% firstof var1 var2 var3 %}
This is equivalent to::
{% if var1 %}
{{ var1|safe }}
{% else %}{% if var2 %}
{{ var2|safe }}
{% else %}{% if var3 %}
{{ var3|safe }}
{% endif %}{% endif %}{% endif %}
but obviously much cleaner!
You can also use a literal string as a fallback value in case all
passed variables are False::
{% firstof var1 var2 var3 "fallback value" %}
If you want to escape the output, use a filter tag::
{% filter force_escape %}
{% firstof var1 var2 var3 "fallback value" %}
{% endfilter %}
"""
bits = token.split_contents()[1:]
if len(bits) < 1:
raise TemplateSyntaxError("'firstof' statement requires at least one argument")
return FirstOfNode([parser.compile_filter(bit) for bit in bits])
firstof = register.tag(firstof)
#@register.tag(name="for")
def do_for(parser, token):
"""
Loops over each item in an array.
For example, to display a list of athletes given ``athlete_list``::
<ul>
{% for athlete in athlete_list %}
<li>{{ athlete.name }}</li>
{% endfor %}
</ul>
You can loop over a list in reverse by using
``{% for obj in list reversed %}``.
You can also unpack multiple values from a two-dimensional array::
{% for key,value in dict.items %}
{{ key }}: {{ value }}
{% endfor %}
The ``for`` tag can take an optional ``{% empty %}`` clause that will
be displayed if the given array is empty or could not be found::
<ul>
{% for athlete in athlete_list %}
<li>{{ athlete.name }}</li>
{% empty %}
<li>Sorry, no athletes in this list.</li>
{% endfor %}
<ul>
The above is equivalent to -- but shorter, cleaner, and possibly faster
than -- the following::
<ul>
{% if althete_list %}
{% for athlete in athlete_list %}
<li>{{ athlete.name }}</li>
{% endfor %}
{% else %}
<li>Sorry, no athletes in this list.</li>
{% endif %}
</ul>
The for loop sets a number of variables available within the loop:
========================== ================================================
Variable Description
========================== ================================================
``forloop.counter`` The current iteration of the loop (1-indexed)
``forloop.counter0`` The current iteration of the loop (0-indexed)
``forloop.revcounter`` The number of iterations from the end of the
loop (1-indexed)
``forloop.revcounter0`` The number of iterations from the end of the
loop (0-indexed)
``forloop.first`` True if this is the first time through the loop
``forloop.last`` True if this is the last time through the loop
``forloop.parentloop`` For nested loops, this is the loop "above" the
current one
========================== ================================================
"""
bits = token.contents.split()
if len(bits) < 4:
raise TemplateSyntaxError("'for' statements should have at least four"
" words: %s" % token.contents)
is_reversed = bits[-1] == 'reversed'
in_index = is_reversed and -3 or -2
if bits[in_index] != 'in':
raise TemplateSyntaxError("'for' statements should use the format"
" 'for x in y': %s" % token.contents)
loopvars = re.sub(r' *, *', ',', ' '.join(bits[1:in_index])).split(',')
for var in loopvars:
if not var or ' ' in var:
raise TemplateSyntaxError("'for' tag received an invalid argument:"
" %s" % token.contents)
sequence = parser.compile_filter(bits[in_index+1])
nodelist_loop = parser.parse(('empty', 'endfor',))
token = parser.next_token()
if token.contents == 'empty':
nodelist_empty = parser.parse(('endfor',))
parser.delete_first_token()
else:
nodelist_empty = None
return ForNode(loopvars, sequence, is_reversed, nodelist_loop, nodelist_empty)
do_for = register.tag("for", do_for)
def do_ifequal(parser, token, negate):
bits = list(token.split_contents())
if len(bits) != 3:
raise TemplateSyntaxError("%r takes two arguments" % bits[0])
end_tag = 'end' + bits[0]
nodelist_true = parser.parse(('else', end_tag))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse((end_tag,))
parser.delete_first_token()
else:
nodelist_false = NodeList()
val1 = parser.compile_filter(bits[1])
val2 = parser.compile_filter(bits[2])
return IfEqualNode(val1, val2, nodelist_true, nodelist_false, negate)
#@register.tag
def ifequal(parser, token):
"""
Outputs the contents of the block if the two arguments equal each other.
Examples::
{% ifequal user.id comment.user_id %}
...
{% endifequal %}
{% ifnotequal user.id comment.user_id %}
...
{% else %}
...
{% endifnotequal %}
"""
return do_ifequal(parser, token, False)
ifequal = register.tag(ifequal)
#@register.tag
def ifnotequal(parser, token):
"""
Outputs the contents of the block if the two arguments are not equal.
See ifequal.
"""
return do_ifequal(parser, token, True)
ifnotequal = register.tag(ifnotequal)
class TemplateLiteral(Literal):
def __init__(self, value, text):
self.value = value
self.text = text # for better error messages
def display(self):
return self.text
def eval(self, context):
return self.value.resolve(context, ignore_failures=True)
class TemplateIfParser(IfParser):
error_class = TemplateSyntaxError
def __init__(self, parser, *args, **kwargs):
self.template_parser = parser
return super(TemplateIfParser, self).__init__(*args, **kwargs)
def create_var(self, value):
return TemplateLiteral(self.template_parser.compile_filter(value), value)
#@register.tag(name="if")
def do_if(parser, token):
"""
The ``{% if %}`` tag evaluates a variable, and if that variable is "true"
(i.e., exists, is not empty, and is not a false boolean value), the
contents of the block are output:
::
{% if athlete_list %}
Number of athletes: {{ athlete_list|count }}
{% else %}
No athletes.
{% endif %}
In the above, if ``athlete_list`` is not empty, the number of athletes will
be displayed by the ``{{ athlete_list|count }}`` variable.
As you can see, the ``if`` tag can take an option ``{% else %}`` clause
that will be displayed if the test fails.
``if`` tags may use ``or``, ``and`` or ``not`` to test a number of
variables or to negate a given variable::
{% if not athlete_list %}
There are no athletes.
{% endif %}
{% if athlete_list or coach_list %}
There are some athletes or some coaches.
{% endif %}
{% if athlete_list and coach_list %}
Both atheletes and coaches are available.
{% endif %}
{% if not athlete_list or coach_list %}
There are no athletes, or there are some coaches.
{% endif %}
{% if athlete_list and not coach_list %}
There are some athletes and absolutely no coaches.
{% endif %}
Comparison operators are also available, and the use of filters is also
allowed, for example:
{% if articles|length >= 5 %}...{% endif %}
Arguments and operators _must_ have a space between them, so
``{% if 1>2 %}`` is not a valid if tag.
All supported operators are: ``or``, ``and``, ``in``, ``not in``
``==`` (or ``=``), ``!=``, ``>``, ``>=``, ``<`` and ``<=``.
Operator precedence follows Python.
"""
bits = token.split_contents()[1:]
var = TemplateIfParser(parser, bits).parse()
nodelist_true = parser.parse(('else', 'endif'))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse(('endif',))
parser.delete_first_token()
else:
nodelist_false = NodeList()
return IfNode(var, nodelist_true, nodelist_false)
do_if = register.tag("if", do_if)
#@register.tag
def ifchanged(parser, token):
"""
Checks if a value has changed from the last iteration of a loop.
The 'ifchanged' block tag is used within a loop. It has two possible uses.
1. Checks its own rendered contents against its previous state and only
displays the content if it has changed. For example, this displays a
list of days, only displaying the month if it changes::
<h1>Archive for {{ year }}</h1>
{% for date in days %}
{% ifchanged %}<h3>{{ date|date:"F" }}</h3>{% endifchanged %}
<a href="{{ date|date:"M/d"|lower }}/">{{ date|date:"j" }}</a>
{% endfor %}
2. If given a variable, check whether that variable has changed.
For example, the following shows the date every time it changes, but
only shows the hour if both the hour and the date have changed::
{% for date in days %}
{% ifchanged date.date %} {{ date.date }} {% endifchanged %}
{% ifchanged date.hour date.date %}
{{ date.hour }}
{% endifchanged %}
{% endfor %}
"""
bits = token.contents.split()
nodelist_true = parser.parse(('else', 'endifchanged'))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse(('endifchanged',))
parser.delete_first_token()
else:
nodelist_false = NodeList()
values = [parser.compile_filter(bit) for bit in bits[1:]]
return IfChangedNode(nodelist_true, nodelist_false, *values)
ifchanged = register.tag(ifchanged)
#@register.tag
def ssi(parser, token):
"""
Outputs the contents of a given file into the page.
Like a simple "include" tag, the ``ssi`` tag includes the contents
of another file -- which must be specified using an absolute path --
in the current page::
{% ssi /home/html/ljworld.com/includes/right_generic.html %}
If the optional "parsed" parameter is given, the contents of the included
file are evaluated as template code, with the current context::
{% ssi /home/html/ljworld.com/includes/right_generic.html parsed %}
"""
bits = token.contents.split()
parsed = False
if len(bits) not in (2, 3):
raise TemplateSyntaxError("'ssi' tag takes one argument: the path to"
" the file to be included")
if len(bits) == 3:
if bits[2] == 'parsed':
parsed = True
else:
raise TemplateSyntaxError("Second (optional) argument to %s tag"
" must be 'parsed'" % bits[0])
return SsiNode(bits[1], parsed)
ssi = register.tag(ssi)
#@register.tag
def load(parser, token):
"""
Loads a custom template tag set.
For example, to load the template tags in
``django/templatetags/news/photos.py``::
{% load news.photos %}
"""
bits = token.contents.split()
for taglib in bits[1:]:
# add the library to the parser
try:
lib = get_library(taglib)
parser.add_library(lib)
except InvalidTemplateLibrary, e:
raise TemplateSyntaxError("'%s' is not a valid tag library: %s" %
(taglib, e))
return LoadNode()
load = register.tag(load)
#@register.tag
def now(parser, token):
"""
Displays the date, formatted according to the given string.
Uses the same format as PHP's ``date()`` function; see http://php.net/date
for all the possible values.
Sample usage::
It is {% now "jS F Y H:i" %}
"""
bits = token.contents.split('"')
if len(bits) != 3:
raise TemplateSyntaxError("'now' statement takes one argument")
format_string = bits[1]
return NowNode(format_string)
now = register.tag(now)
#@register.tag
def regroup(parser, token):
"""
Regroups a list of alike objects by a common attribute.
This complex tag is best illustrated by use of an example: say that
``people`` is a list of ``Person`` objects that have ``first_name``,
``last_name``, and ``gender`` attributes, and you'd like to display a list
that looks like:
* Male:
* George Bush
* Bill Clinton
* Female:
* Margaret Thatcher
* Colendeeza Rice
* Unknown:
* Pat Smith
The following snippet of template code would accomplish this dubious task::
{% regroup people by gender as grouped %}
<ul>
{% for group in grouped %}
<li>{{ group.grouper }}
<ul>
{% for item in group.list %}
<li>{{ item }}</li>
{% endfor %}
</ul>
{% endfor %}
</ul>
As you can see, ``{% regroup %}`` populates a variable with a list of
objects with ``grouper`` and ``list`` attributes. ``grouper`` contains the
item that was grouped by; ``list`` contains the list of objects that share
that ``grouper``. In this case, ``grouper`` would be ``Male``, ``Female``
and ``Unknown``, and ``list`` is the list of people with those genders.
Note that ``{% regroup %}`` does not work when the list to be grouped is not
sorted by the key you are grouping by! This means that if your list of
people was not sorted by gender, you'd need to make sure it is sorted
before using it, i.e.::
{% regroup people|dictsort:"gender" by gender as grouped %}
"""
firstbits = token.contents.split(None, 3)
if len(firstbits) != 4:
raise TemplateSyntaxError("'regroup' tag takes five arguments")
target = parser.compile_filter(firstbits[1])
if firstbits[2] != 'by':
raise TemplateSyntaxError("second argument to 'regroup' tag must be 'by'")
lastbits_reversed = firstbits[3][::-1].split(None, 2)
if lastbits_reversed[1][::-1] != 'as':
raise TemplateSyntaxError("next-to-last argument to 'regroup' tag must"
" be 'as'")
expression = parser.compile_filter(lastbits_reversed[2][::-1])
var_name = lastbits_reversed[0][::-1]
return RegroupNode(target, expression, var_name)
regroup = register.tag(regroup)
def spaceless(parser, token):
"""
Removes whitespace between HTML tags, including tab and newline characters.
Example usage::
{% spaceless %}
<p>
<a href="foo/">Foo</a>
</p>
{% endspaceless %}
This example would return this HTML::
<p><a href="foo/">Foo</a></p>
Only space between *tags* is normalized -- not space between tags and text.
In this example, the space around ``Hello`` won't be stripped::
{% spaceless %}
<strong>
Hello
</strong>
{% endspaceless %}
"""
nodelist = parser.parse(('endspaceless',))
parser.delete_first_token()
return SpacelessNode(nodelist)
spaceless = register.tag(spaceless)
#@register.tag
def templatetag(parser, token):
"""
Outputs one of the bits used to compose template tags.
Since the template system has no concept of "escaping", to display one of
the bits used in template tags, you must use the ``{% templatetag %}`` tag.
The argument tells which template bit to output:
================== =======
Argument Outputs
================== =======
``openblock`` ``{%``
``closeblock`` ``%}``
``openvariable`` ``{{``
``closevariable`` ``}}``
``openbrace`` ``{``
``closebrace`` ``}``
``opencomment`` ``{#``
``closecomment`` ``#}``
================== =======
"""
bits = token.contents.split()
if len(bits) != 2:
raise TemplateSyntaxError("'templatetag' statement takes one argument")
tag = bits[1]
if tag not in TemplateTagNode.mapping:
raise TemplateSyntaxError("Invalid templatetag argument: '%s'."
" Must be one of: %s" %
(tag, TemplateTagNode.mapping.keys()))
return TemplateTagNode(tag)
templatetag = register.tag(templatetag)
def url(parser, token):
"""
Returns an absolute URL matching given view with its parameters.
This is a way to define links that aren't tied to a particular URL
configuration::
{% url path.to.some_view arg1 arg2 %}
or
{% url path.to.some_view name1=value1 name2=value2 %}
The first argument is a path to a view. It can be an absolute python path
or just ``app_name.view_name`` without the project name if the view is
located inside the project. Other arguments are comma-separated values
that will be filled in place of positional and keyword arguments in the
URL. All arguments for the URL should be present.
For example if you have a view ``app_name.client`` taking client's id and
the corresponding line in a URLconf looks like this::
('^client/(\d+)/$', 'app_name.client')
and this app's URLconf is included into the project's URLconf under some
path::
('^clients/', include('project_name.app_name.urls'))
then in a template you can create a link for a certain client like this::
{% url app_name.client client.id %}
The URL will look like ``/clients/client/123/``.
"""
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError("'%s' takes at least one argument"
" (path to a view)" % bits[0])
viewname = bits[1]
args = []
kwargs = {}
asvar = None
bits = bits[2:]
if len(bits) >= 2 and bits[-2] == 'as':
asvar = bits[-1]
bits = bits[:-2]
# Backwards compatibility: check for the old comma separated format
# {% url urlname arg1,arg2 %}
# Initial check - that the first space separated bit has a comma in it
if bits and ',' in bits[0]:
check_old_format = True
# In order to *really* be old format, there must be a comma
# in *every* space separated bit, except the last.
for bit in bits[1:-1]:
if ',' not in bit:
# No comma in this bit. Either the comma we found
# in bit 1 was a false positive (e.g., comma in a string),
# or there is a syntax problem with missing commas
check_old_format = False
break
else:
# No comma found - must be new format.
check_old_format = False
if check_old_format:
# Confirm that this is old format by trying to parse the first
# argument. An exception will be raised if the comma is
# unexpected (i.e. outside of a static string).
match = kwarg_re.match(bits[0])
if match:
value = match.groups()[1]
try:
parser.compile_filter(value)
except TemplateSyntaxError:
bits = ''.join(bits).split(',')
# Now all the bits are parsed into new format,
# process them as template vars
if len(bits):
for bit in bits:
match = kwarg_re.match(bit)
if not match:
raise TemplateSyntaxError("Malformed arguments to url tag")
name, value = match.groups()
if name:
kwargs[name] = parser.compile_filter(value)
else:
args.append(parser.compile_filter(value))
return URLNode(viewname, args, kwargs, asvar)
url = register.tag(url)
#@register.tag
def widthratio(parser, token):
"""
For creating bar charts and such, this tag calculates the ratio of a given
value to a maximum value, and then applies that ratio to a constant.
For example::
<img src='bar.gif' height='10' width='{% widthratio this_value max_value 100 %}' />
Above, if ``this_value`` is 175 and ``max_value`` is 200, the image in
the above example will be 88 pixels wide (because 175/200 = .875;
.875 * 100 = 87.5 which is rounded up to 88).
"""
bits = token.contents.split()
if len(bits) != 4:
raise TemplateSyntaxError("widthratio takes three arguments")
tag, this_value_expr, max_value_expr, max_width = bits
return WidthRatioNode(parser.compile_filter(this_value_expr),
parser.compile_filter(max_value_expr),
parser.compile_filter(max_width))
widthratio = register.tag(widthratio)
#@register.tag
def do_with(parser, token):
"""
Adds a value to the context (inside of this block) for caching and easy
access.
For example::
{% with person.some_sql_method as total %}
{{ total }} object{{ total|pluralize }}
{% endwith %}
"""
bits = list(token.split_contents())
if len(bits) != 4 or bits[2] != "as":
raise TemplateSyntaxError("%r expected format is 'value as name'" %
bits[0])
var = parser.compile_filter(bits[1])
name = bits[3]
nodelist = parser.parse(('endwith',))
parser.delete_first_token()
return WithNode(var, name, nodelist)
do_with = register.tag('with', do_with)
| {
"content_hash": "b55277631e5f364365e5eb84e4874e89",
"timestamp": "",
"source": "github",
"line_count": 1217,
"max_line_length": 240,
"avg_line_length": 35.16844700082169,
"alnum_prop": 0.5832009345794392,
"repo_name": "tungvx/deploy",
"id": "e9a9d2e0c2b8f1266802fe080398b6857416753d",
"size": "42800",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": ".google_appengine/google/appengine/_internal/django/template/defaulttags.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "400492"
},
{
"name": "JavaScript",
"bytes": "477245"
},
{
"name": "Python",
"bytes": "16861113"
},
{
"name": "Shell",
"bytes": "8221"
}
],
"symlink_target": ""
} |
import simplejson as json
def find(element, search, path=[]):
for key, value in element.items() if isinstance(element, dict) else enumerate(element):
if isinstance(value, (dict, list)):
if find(value, search, path):
path.append(key)
return path
elif value == search:
path.append(key)
return path
if __name__ == "__main__":
with open("input/input3.txt", "r") as file:
print(" -> ".join(map(str, find(json.load(file), "dailyprogrammer")[::-1]))) | {
"content_hash": "eb680317907c96948a4834c1f63008c5",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 91,
"avg_line_length": 36.333333333333336,
"alnum_prop": 0.563302752293578,
"repo_name": "marcardioid/DailyProgrammer",
"id": "1e6e440c523d724fd1042c8e0b2d5eca431cd65e",
"size": "545",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "solutions/230_Easy/solution.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "256"
},
{
"name": "HTML",
"bytes": "3716"
},
{
"name": "JavaScript",
"bytes": "116063"
},
{
"name": "Python",
"bytes": "59838"
}
],
"symlink_target": ""
} |
"""
Use Bayesian Inference to trigger a binary sensor.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/binary_sensor.bayesian/
"""
import asyncio
import logging
from collections import OrderedDict
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.binary_sensor import (
BinarySensorDevice, PLATFORM_SCHEMA)
from homeassistant.const import (
CONF_ABOVE, CONF_BELOW, CONF_DEVICE_CLASS, CONF_ENTITY_ID, CONF_NAME,
CONF_PLATFORM, CONF_STATE, STATE_UNKNOWN)
from homeassistant.core import callback
from homeassistant.helpers import condition
from homeassistant.helpers.event import async_track_state_change
_LOGGER = logging.getLogger(__name__)
ATTR_OBSERVATIONS = 'observations'
ATTR_PROBABILITY = 'probability'
ATTR_PROBABILITY_THRESHOLD = 'probability_threshold'
CONF_OBSERVATIONS = 'observations'
CONF_PRIOR = 'prior'
CONF_PROBABILITY_THRESHOLD = 'probability_threshold'
CONF_P_GIVEN_F = 'prob_given_false'
CONF_P_GIVEN_T = 'prob_given_true'
CONF_TO_STATE = 'to_state'
DEFAULT_NAME = "Bayesian Binary Sensor"
DEFAULT_PROBABILITY_THRESHOLD = 0.5
NUMERIC_STATE_SCHEMA = vol.Schema({
CONF_PLATFORM: 'numeric_state',
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Optional(CONF_ABOVE): vol.Coerce(float),
vol.Optional(CONF_BELOW): vol.Coerce(float),
vol.Required(CONF_P_GIVEN_T): vol.Coerce(float),
vol.Optional(CONF_P_GIVEN_F): vol.Coerce(float)
}, required=True)
STATE_SCHEMA = vol.Schema({
CONF_PLATFORM: CONF_STATE,
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TO_STATE): cv.string,
vol.Required(CONF_P_GIVEN_T): vol.Coerce(float),
vol.Optional(CONF_P_GIVEN_F): vol.Coerce(float)
}, required=True)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_DEVICE_CLASS): cv.string,
vol.Required(CONF_OBSERVATIONS):
vol.Schema(vol.All(cv.ensure_list,
[vol.Any(NUMERIC_STATE_SCHEMA, STATE_SCHEMA)])),
vol.Required(CONF_PRIOR): vol.Coerce(float),
vol.Optional(CONF_PROBABILITY_THRESHOLD,
default=DEFAULT_PROBABILITY_THRESHOLD): vol.Coerce(float),
})
def update_probability(prior, prob_true, prob_false):
"""Update probability using Bayes' rule."""
numerator = prob_true * prior
denominator = numerator + prob_false * (1 - prior)
probability = numerator / denominator
return probability
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Set up the Bayesian Binary sensor."""
name = config.get(CONF_NAME)
observations = config.get(CONF_OBSERVATIONS)
prior = config.get(CONF_PRIOR)
probability_threshold = config.get(CONF_PROBABILITY_THRESHOLD)
device_class = config.get(CONF_DEVICE_CLASS)
async_add_devices([
BayesianBinarySensor(
name, prior, observations, probability_threshold, device_class)
], True)
class BayesianBinarySensor(BinarySensorDevice):
"""Representation of a Bayesian sensor."""
def __init__(self, name, prior, observations, probability_threshold,
device_class):
"""Initialize the Bayesian sensor."""
self._name = name
self._observations = observations
self._probability_threshold = probability_threshold
self._device_class = device_class
self._deviation = False
self.prior = prior
self.probability = prior
self.current_obs = OrderedDict({})
to_observe = set(obs['entity_id'] for obs in self._observations)
self.entity_obs = dict.fromkeys(to_observe, [])
for ind, obs in enumerate(self._observations):
obs['id'] = ind
self.entity_obs[obs['entity_id']].append(obs)
self.watchers = {
'numeric_state': self._process_numeric_state,
'state': self._process_state
}
@asyncio.coroutine
def async_added_to_hass(self):
"""Call when entity about to be added."""
@callback
# pylint: disable=invalid-name
def async_threshold_sensor_state_listener(entity, old_state,
new_state):
"""Handle sensor state changes."""
if new_state.state == STATE_UNKNOWN:
return
entity_obs_list = self.entity_obs[entity]
for entity_obs in entity_obs_list:
platform = entity_obs['platform']
self.watchers[platform](entity_obs)
prior = self.prior
for obs in self.current_obs.values():
prior = update_probability(
prior, obs['prob_true'], obs['prob_false'])
self.probability = prior
self.hass.async_add_job(self.async_update_ha_state, True)
entities = [obs['entity_id'] for obs in self._observations]
async_track_state_change(
self.hass, entities, async_threshold_sensor_state_listener)
def _update_current_obs(self, entity_observation, should_trigger):
"""Update current observation."""
obs_id = entity_observation['id']
if should_trigger:
prob_true = entity_observation['prob_given_true']
prob_false = entity_observation.get(
'prob_given_false', 1 - prob_true)
self.current_obs[obs_id] = {
'prob_true': prob_true,
'prob_false': prob_false
}
else:
self.current_obs.pop(obs_id, None)
def _process_numeric_state(self, entity_observation):
"""Add entity to current_obs if numeric state conditions are met."""
entity = entity_observation['entity_id']
should_trigger = condition.async_numeric_state(
self.hass, entity,
entity_observation.get('below'),
entity_observation.get('above'), None, entity_observation)
self._update_current_obs(entity_observation, should_trigger)
def _process_state(self, entity_observation):
"""Add entity to current observations if state conditions are met."""
entity = entity_observation['entity_id']
should_trigger = condition.state(
self.hass, entity, entity_observation.get('to_state'))
self._update_current_obs(entity_observation, should_trigger)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return true if sensor is on."""
return self._deviation
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def device_class(self):
"""Return the sensor class of the sensor."""
return self._device_class
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
return {
ATTR_OBSERVATIONS: [val for val in self.current_obs.values()],
ATTR_PROBABILITY: round(self.probability, 2),
ATTR_PROBABILITY_THRESHOLD: self._probability_threshold,
}
@asyncio.coroutine
def async_update(self):
"""Get the latest data and update the states."""
self._deviation = bool(self.probability > self._probability_threshold)
| {
"content_hash": "f299541b5eedd6449e3e2e4675adfee8",
"timestamp": "",
"source": "github",
"line_count": 220,
"max_line_length": 79,
"avg_line_length": 33.9,
"alnum_prop": 0.6440064360418343,
"repo_name": "stefan-jonasson/home-assistant",
"id": "f3dbc912ade1209f1760649c9379cbb93bb54564",
"size": "7458",
"binary": false,
"copies": "8",
"ref": "refs/heads/dev",
"path": "homeassistant/components/binary_sensor/bayesian.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4056"
},
{
"name": "Python",
"bytes": "8360711"
},
{
"name": "Ruby",
"bytes": "517"
},
{
"name": "Shell",
"bytes": "12658"
}
],
"symlink_target": ""
} |
from twisted.python import log
from autobahn.twisted import websocket
import logging
import os
import time
import pexpect
import sys
import threading
from universe.vncdriver.vnc_proxy_server import VNCProxyServer
from universe.rewarder.reward_proxy_server import RewardProxyServer
from universe import utils
logger = logging.getLogger(__name__)
class DualProxyServer(VNCProxyServer):
def __init__(self, action_queue=None, error_buffer=None, enable_logging=True):
self._log_info('DualProxyServer inited')
self.reward_proxy = None
super(DualProxyServer, self).__init__(action_queue, error_buffer, enable_logging)
def _log_info(self, msg, *args, **kwargs):
logger.info('[dual_proxy] ' + msg, *args, **kwargs)
def recv_ClientInit(self, block):
# start reward proxy.
self._log_info('Starting reward proxy server')
self.reward_proxy = pexpect.spawnu(self.factory.reward_proxy_bin,
logfile=sys.stdout,
timeout=None)
# wait on reward proxy to be up.
self._log_info('Waiting for reward proxy server')
self.reward_proxy.expect('\[RewardProxyServer\]')
self.reward_proxy_thread = threading.Thread(target=lambda: self.reward_proxy.expect(pexpect.EOF))
self.reward_proxy_thread.start()
self._log_info('Reward proxy server is up %s', self.reward_proxy.before)
super(DualProxyServer, self).recv_ClientInit(block)
self.logfile_dir = self.log_manager.logfile_dir
def close(self):
# end connections.
super(DualProxyServer, self).close()
# wait for rewarder to close.
if self.reward_proxy:
self.reward_proxy.terminate()
# upload to s3.
# probably hacky right now.
logger.info('log manager = %s', self.log_manager)
if self.log_manager:
os.system('/app/universe/bin/upload_directory.sh demonstrator_%(recorder_id)s %(directory)s %(bucket)s' %
dict(recorder_id=self.factory.recorder_id, directory=self.logfile_dir,
bucket=self.factory.bucket)
)
| {
"content_hash": "c1d2f8657520f6aea7de7a4f0f33fda2",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 117,
"avg_line_length": 35.67741935483871,
"alnum_prop": 0.6392405063291139,
"repo_name": "rht/universe",
"id": "088673a7e5838ff154d1eed5bc58a2c1c5d47842",
"size": "2271",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "universe/vncdriver/dual_proxy_server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2718"
},
{
"name": "Python",
"bytes": "536345"
}
],
"symlink_target": ""
} |
import re
from ansible.module_utils.six import iteritems
from ansible.module_utils.network.common.utils import to_list
from ansible.module_utils.network.eos.providers.providers import register_provider
from ansible.module_utils.network.eos.providers.providers import CliProvider
from ansible.module_utils.network.eos.providers.cli.config.bgp.neighbors import Neighbors
from ansible.module_utils.network.eos.providers.cli.config.bgp.address_family import AddressFamily
REDISTRIBUTE_PROTOCOLS = frozenset(['ospf', 'ospf3', 'rip', 'isis', 'static', 'connected'])
@register_provider('eos', 'eos_bgp')
class Provider(CliProvider):
def render(self, config=None):
commands = list()
existing_as = None
if config:
match = re.search(r'router bgp (\d+)', config, re.M)
if match:
existing_as = match.group(1)
operation = self.params['operation']
context = None
if self.params['config']:
context = 'router bgp %s' % self.get_value('config.bgp_as')
if operation == 'delete':
if existing_as:
commands.append('no router bgp %s' % existing_as)
elif context:
commands.append('no %s' % context)
else:
self._validate_input(config)
if operation == 'replace':
if existing_as and int(existing_as) != self.get_value('config.bgp_as'):
commands.append('no router bgp %s' % existing_as)
config = None
elif operation == 'override':
if existing_as:
commands.append('no router bgp %s' % existing_as)
config = None
context_commands = list()
for key, value in iteritems(self.get_value('config')):
if value is not None:
meth = getattr(self, '_render_%s' % key, None)
if meth:
resp = meth(config)
if resp:
context_commands.extend(to_list(resp))
if context and context_commands:
commands.append(context)
commands.extend(context_commands)
commands.append('exit')
return commands
def _render_router_id(self, config=None):
cmd = 'router-id %s' % self.get_value('config.router_id')
if not config or cmd not in config:
return cmd
def _render_log_neighbor_changes(self, config=None):
cmd = 'bgp log-neighbor-changes'
log_neighbor_changes = self.get_value('config.log_neighbor_changes')
if log_neighbor_changes is True:
if not config or cmd not in config:
return cmd
elif log_neighbor_changes is False:
if config and cmd in config:
return 'no %s' % cmd
def _render_networks(self, config=None):
commands = list()
safe_list = list()
for entry in self.get_value('config.networks'):
network = entry['prefix']
if entry['masklen']:
network = '%s/%s' % (entry['prefix'], entry['masklen'])
safe_list.append(network)
cmd = 'network %s' % network
if entry['route_map']:
cmd += ' route-map %s' % entry['route_map']
if not config or cmd not in config:
commands.append(cmd)
if self.params['operation'] == 'replace':
if config:
matches = re.findall(r'network (\S+)', config, re.M)
for entry in set(matches).difference(safe_list):
commands.append('no network %s' % entry)
return commands
def _render_redistribute(self, config=None):
commands = list()
safe_list = list()
for entry in self.get_value('config.redistribute'):
option = entry['protocol']
cmd = 'redistribute %s' % entry['protocol']
if entry['route_map']:
cmd += ' route-map %s' % entry['route_map']
if not config or cmd not in config:
commands.append(cmd)
safe_list.append(option)
if self.params['operation'] == 'replace':
if config:
matches = re.findall(r'redistribute (\S+)(?:\s*)(\d*)', config, re.M)
for i in range(0, len(matches)):
matches[i] = ' '.join(matches[i]).strip()
for entry in set(matches).difference(safe_list):
commands.append('no redistribute %s' % entry)
return commands
def _render_neighbors(self, config):
""" generate bgp neighbor configuration
"""
return Neighbors(self.params).render(config)
def _render_address_family(self, config):
""" generate address-family configuration
"""
return AddressFamily(self.params).render(config)
def _validate_input(self, config):
def device_has_AF(config):
return re.search(r'address-family (?:.*)', config)
address_family = self.get_value('config.address_family')
root_networks = self.get_value('config.networks')
operation = self.params['operation']
if operation == 'replace' and root_networks:
if address_family:
for item in address_family:
if item['networks']:
raise ValueError('operation is replace but provided both root level networks and networks under %s address family'
% item['afi'])
if config and device_has_AF(config):
raise ValueError('operation is replace and device has one or more address family activated but root level network(s) provided')
| {
"content_hash": "3522f3ba929d92a21e42767c58aca01a",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 143,
"avg_line_length": 36.75471698113208,
"alnum_prop": 0.5592060232717317,
"repo_name": "thaim/ansible",
"id": "a2f40affaea26972e6ec18faf06240c49b322f57",
"size": "5977",
"binary": false,
"copies": "24",
"ref": "refs/heads/fix-broken-link",
"path": "lib/ansible/module_utils/network/eos/providers/cli/config/bgp/process.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
} |
"""
This is only meant to add docs to objects defined in C-extension modules.
The purpose is to allow easier editing of the docstrings without
requiring a re-compile.
NOTE: Many of the methods of ndarray have corresponding functions.
If you update these docstrings, please keep also the ones in
core/fromnumeric.py, core/defmatrix.py up-to-date.
"""
from __future__ import division, absolute_import, print_function
import sys
from numpy.core import numerictypes as _numerictypes
from numpy.core import dtype
from numpy.core.function_base import add_newdoc
###############################################################################
#
# flatiter
#
# flatiter needs a toplevel description
#
###############################################################################
add_newdoc('numpy.core', 'flatiter',
"""
Flat iterator object to iterate over arrays.
A `flatiter` iterator is returned by ``x.flat`` for any array `x`.
It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in row-major, C-style order (the last
index varying the fastest). The iterator can also be indexed using
basic slicing or advanced indexing.
See Also
--------
ndarray.flat : Return a flat iterator over an array.
ndarray.flatten : Returns a flattened copy of an array.
Notes
-----
A `flatiter` iterator can not be constructed directly from Python code
by calling the `flatiter` constructor.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> type(fl)
<class 'numpy.flatiter'>
>>> for item in fl:
... print(item)
...
0
1
2
3
4
5
>>> fl[2:4]
array([2, 3])
""")
# flatiter attributes
add_newdoc('numpy.core', 'flatiter', ('base',
"""
A reference to the array that is iterated over.
Examples
--------
>>> x = np.arange(5)
>>> fl = x.flat
>>> fl.base is x
True
"""))
add_newdoc('numpy.core', 'flatiter', ('coords',
"""
An N-dimensional tuple of current coordinates.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> fl.coords
(0, 0)
>>> next(fl)
0
>>> fl.coords
(0, 1)
"""))
add_newdoc('numpy.core', 'flatiter', ('index',
"""
Current flat index into the array.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> fl.index
0
>>> next(fl)
0
>>> fl.index
1
"""))
# flatiter functions
add_newdoc('numpy.core', 'flatiter', ('__array__',
"""__array__(type=None) Get array from iterator
"""))
add_newdoc('numpy.core', 'flatiter', ('copy',
"""
copy()
Get a copy of the iterator as a 1-D array.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> fl = x.flat
>>> fl.copy()
array([0, 1, 2, 3, 4, 5])
"""))
###############################################################################
#
# nditer
#
###############################################################################
add_newdoc('numpy.core', 'nditer',
"""
Efficient multi-dimensional iterator object to iterate over arrays.
To get started using this object, see the
:ref:`introductory guide to array iteration <arrays.nditer>`.
Parameters
----------
op : ndarray or sequence of array_like
The array(s) to iterate over.
flags : sequence of str, optional
Flags to control the behavior of the iterator.
* ``buffered`` enables buffering when required.
* ``c_index`` causes a C-order index to be tracked.
* ``f_index`` causes a Fortran-order index to be tracked.
* ``multi_index`` causes a multi-index, or a tuple of indices
with one per iteration dimension, to be tracked.
* ``common_dtype`` causes all the operands to be converted to
a common data type, with copying or buffering as necessary.
* ``copy_if_overlap`` causes the iterator to determine if read
operands have overlap with write operands, and make temporary
copies as necessary to avoid overlap. False positives (needless
copying) are possible in some cases.
* ``delay_bufalloc`` delays allocation of the buffers until
a reset() call is made. Allows ``allocate`` operands to
be initialized before their values are copied into the buffers.
* ``external_loop`` causes the ``values`` given to be
one-dimensional arrays with multiple values instead of
zero-dimensional arrays.
* ``grow_inner`` allows the ``value`` array sizes to be made
larger than the buffer size when both ``buffered`` and
``external_loop`` is used.
* ``ranged`` allows the iterator to be restricted to a sub-range
of the iterindex values.
* ``refs_ok`` enables iteration of reference types, such as
object arrays.
* ``reduce_ok`` enables iteration of ``readwrite`` operands
which are broadcasted, also known as reduction operands.
* ``zerosize_ok`` allows `itersize` to be zero.
op_flags : list of list of str, optional
This is a list of flags for each operand. At minimum, one of
``readonly``, ``readwrite``, or ``writeonly`` must be specified.
* ``readonly`` indicates the operand will only be read from.
* ``readwrite`` indicates the operand will be read from and written to.
* ``writeonly`` indicates the operand will only be written to.
* ``no_broadcast`` prevents the operand from being broadcasted.
* ``contig`` forces the operand data to be contiguous.
* ``aligned`` forces the operand data to be aligned.
* ``nbo`` forces the operand data to be in native byte order.
* ``copy`` allows a temporary read-only copy if required.
* ``updateifcopy`` allows a temporary read-write copy if required.
* ``allocate`` causes the array to be allocated if it is None
in the ``op`` parameter.
* ``no_subtype`` prevents an ``allocate`` operand from using a subtype.
* ``arraymask`` indicates that this operand is the mask to use
for selecting elements when writing to operands with the
'writemasked' flag set. The iterator does not enforce this,
but when writing from a buffer back to the array, it only
copies those elements indicated by this mask.
* ``writemasked`` indicates that only elements where the chosen
``arraymask`` operand is True will be written to.
* ``overlap_assume_elementwise`` can be used to mark operands that are
accessed only in the iterator order, to allow less conservative
copying when ``copy_if_overlap`` is present.
op_dtypes : dtype or tuple of dtype(s), optional
The required data type(s) of the operands. If copying or buffering
is enabled, the data will be converted to/from their original types.
order : {'C', 'F', 'A', 'K'}, optional
Controls the iteration order. 'C' means C order, 'F' means
Fortran order, 'A' means 'F' order if all the arrays are Fortran
contiguous, 'C' order otherwise, and 'K' means as close to the
order the array elements appear in memory as possible. This also
affects the element memory order of ``allocate`` operands, as they
are allocated to be compatible with iteration order.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur when making a copy
or buffering. Setting this to 'unsafe' is not recommended,
as it can adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
op_axes : list of list of ints, optional
If provided, is a list of ints or None for each operands.
The list of axes for an operand is a mapping from the dimensions
of the iterator to the dimensions of the operand. A value of
-1 can be placed for entries, causing that dimension to be
treated as `newaxis`.
itershape : tuple of ints, optional
The desired shape of the iterator. This allows ``allocate`` operands
with a dimension mapped by op_axes not corresponding to a dimension
of a different operand to get a value not equal to 1 for that
dimension.
buffersize : int, optional
When buffering is enabled, controls the size of the temporary
buffers. Set to 0 for the default value.
Attributes
----------
dtypes : tuple of dtype(s)
The data types of the values provided in `value`. This may be
different from the operand data types if buffering is enabled.
Valid only before the iterator is closed.
finished : bool
Whether the iteration over the operands is finished or not.
has_delayed_bufalloc : bool
If True, the iterator was created with the ``delay_bufalloc`` flag,
and no reset() function was called on it yet.
has_index : bool
If True, the iterator was created with either the ``c_index`` or
the ``f_index`` flag, and the property `index` can be used to
retrieve it.
has_multi_index : bool
If True, the iterator was created with the ``multi_index`` flag,
and the property `multi_index` can be used to retrieve it.
index
When the ``c_index`` or ``f_index`` flag was used, this property
provides access to the index. Raises a ValueError if accessed
and ``has_index`` is False.
iterationneedsapi : bool
Whether iteration requires access to the Python API, for example
if one of the operands is an object array.
iterindex : int
An index which matches the order of iteration.
itersize : int
Size of the iterator.
itviews
Structured view(s) of `operands` in memory, matching the reordered
and optimized iterator access pattern. Valid only before the iterator
is closed.
multi_index
When the ``multi_index`` flag was used, this property
provides access to the index. Raises a ValueError if accessed
accessed and ``has_multi_index`` is False.
ndim : int
The dimensions of the iterator.
nop : int
The number of iterator operands.
operands : tuple of operand(s)
The array(s) to be iterated over. Valid only before the iterator is
closed.
shape : tuple of ints
Shape tuple, the shape of the iterator.
value
Value of ``operands`` at current iteration. Normally, this is a
tuple of array scalars, but if the flag ``external_loop`` is used,
it is a tuple of one dimensional arrays.
Notes
-----
`nditer` supersedes `flatiter`. The iterator implementation behind
`nditer` is also exposed by the NumPy C API.
The Python exposure supplies two iteration interfaces, one which follows
the Python iterator protocol, and another which mirrors the C-style
do-while pattern. The native Python approach is better in most cases, but
if you need the coordinates or index of an iterator, use the C-style pattern.
Examples
--------
Here is how we might write an ``iter_add`` function, using the
Python iterator protocol:
>>> def iter_add_py(x, y, out=None):
... addop = np.add
... it = np.nditer([x, y, out], [],
... [['readonly'], ['readonly'], ['writeonly','allocate']])
... with it:
... for (a, b, c) in it:
... addop(a, b, out=c)
... return it.operands[2]
Here is the same function, but following the C-style pattern:
>>> def iter_add(x, y, out=None):
... addop = np.add
... it = np.nditer([x, y, out], [],
... [['readonly'], ['readonly'], ['writeonly','allocate']])
... with it:
... while not it.finished:
... addop(it[0], it[1], out=it[2])
... it.iternext()
... return it.operands[2]
Here is an example outer product function:
>>> def outer_it(x, y, out=None):
... mulop = np.multiply
... it = np.nditer([x, y, out], ['external_loop'],
... [['readonly'], ['readonly'], ['writeonly', 'allocate']],
... op_axes=[list(range(x.ndim)) + [-1] * y.ndim,
... [-1] * x.ndim + list(range(y.ndim)),
... None])
... with it:
... for (a, b, c) in it:
... mulop(a, b, out=c)
... return it.operands[2]
>>> a = np.arange(2)+1
>>> b = np.arange(3)+1
>>> outer_it(a,b)
array([[1, 2, 3],
[2, 4, 6]])
Here is an example function which operates like a "lambda" ufunc:
>>> def luf(lamdaexpr, *args, **kwargs):
... '''luf(lambdaexpr, op1, ..., opn, out=None, order='K', casting='safe', buffersize=0)'''
... nargs = len(args)
... op = (kwargs.get('out',None),) + args
... it = np.nditer(op, ['buffered','external_loop'],
... [['writeonly','allocate','no_broadcast']] +
... [['readonly','nbo','aligned']]*nargs,
... order=kwargs.get('order','K'),
... casting=kwargs.get('casting','safe'),
... buffersize=kwargs.get('buffersize',0))
... while not it.finished:
... it[0] = lamdaexpr(*it[1:])
... it.iternext()
... return it.operands[0]
>>> a = np.arange(5)
>>> b = np.ones(5)
>>> luf(lambda i,j:i*i + j/2, a, b)
array([ 0.5, 1.5, 4.5, 9.5, 16.5])
If operand flags `"writeonly"` or `"readwrite"` are used the
operands may be views into the original data with the
`WRITEBACKIFCOPY` flag. In this case `nditer` must be used as a
context manager or the `nditer.close` method must be called before
using the result. The temporary data will be written back to the
original data when the `__exit__` function is called but not before:
>>> a = np.arange(6, dtype='i4')[::-2]
>>> with np.nditer(a, [],
... [['writeonly', 'updateifcopy']],
... casting='unsafe',
... op_dtypes=[np.dtype('f4')]) as i:
... x = i.operands[0]
... x[:] = [-1, -2, -3]
... # a still unchanged here
>>> a, x
(array([-1, -2, -3], dtype=int32), array([-1., -2., -3.], dtype=float32))
It is important to note that once the iterator is exited, dangling
references (like `x` in the example) may or may not share data with
the original data `a`. If writeback semantics were active, i.e. if
`x.base.flags.writebackifcopy` is `True`, then exiting the iterator
will sever the connection between `x` and `a`, writing to `x` will
no longer write to `a`. If writeback semantics are not active, then
`x.data` will still point at some part of `a.data`, and writing to
one will affect the other.
Context management and the `close` method appeared in version 1.15.0.
""")
# nditer methods
add_newdoc('numpy.core', 'nditer', ('copy',
"""
copy()
Get a copy of the iterator in its current state.
Examples
--------
>>> x = np.arange(10)
>>> y = x + 1
>>> it = np.nditer([x, y])
>>> next(it)
(array(0), array(1))
>>> it2 = it.copy()
>>> next(it2)
(array(1), array(2))
"""))
add_newdoc('numpy.core', 'nditer', ('operands',
"""
operands[`Slice`]
The array(s) to be iterated over. Valid only before the iterator is closed.
"""))
add_newdoc('numpy.core', 'nditer', ('debug_print',
"""
debug_print()
Print the current state of the `nditer` instance and debug info to stdout.
"""))
add_newdoc('numpy.core', 'nditer', ('enable_external_loop',
"""
enable_external_loop()
When the "external_loop" was not used during construction, but
is desired, this modifies the iterator to behave as if the flag
was specified.
"""))
add_newdoc('numpy.core', 'nditer', ('iternext',
"""
iternext()
Check whether iterations are left, and perform a single internal iteration
without returning the result. Used in the C-style pattern do-while
pattern. For an example, see `nditer`.
Returns
-------
iternext : bool
Whether or not there are iterations left.
"""))
add_newdoc('numpy.core', 'nditer', ('remove_axis',
"""
remove_axis(i)
Removes axis `i` from the iterator. Requires that the flag "multi_index"
be enabled.
"""))
add_newdoc('numpy.core', 'nditer', ('remove_multi_index',
"""
remove_multi_index()
When the "multi_index" flag was specified, this removes it, allowing
the internal iteration structure to be optimized further.
"""))
add_newdoc('numpy.core', 'nditer', ('reset',
"""
reset()
Reset the iterator to its initial state.
"""))
add_newdoc('numpy.core', 'nested_iters',
"""
Create nditers for use in nested loops
Create a tuple of `nditer` objects which iterate in nested loops over
different axes of the op argument. The first iterator is used in the
outermost loop, the last in the innermost loop. Advancing one will change
the subsequent iterators to point at its new element.
Parameters
----------
op : ndarray or sequence of array_like
The array(s) to iterate over.
axes : list of list of int
Each item is used as an "op_axes" argument to an nditer
flags, op_flags, op_dtypes, order, casting, buffersize (optional)
See `nditer` parameters of the same name
Returns
-------
iters : tuple of nditer
An nditer for each item in `axes`, outermost first
See Also
--------
nditer
Examples
--------
Basic usage. Note how y is the "flattened" version of
[a[:, 0, :], a[:, 1, 0], a[:, 2, :]] since we specified
the first iter's axes as [1]
>>> a = np.arange(12).reshape(2, 3, 2)
>>> i, j = np.nested_iters(a, [[1], [0, 2]], flags=["multi_index"])
>>> for x in i:
... print(i.multi_index)
... for y in j:
... print('', j.multi_index, y)
(0,)
(0, 0) 0
(0, 1) 1
(1, 0) 6
(1, 1) 7
(1,)
(0, 0) 2
(0, 1) 3
(1, 0) 8
(1, 1) 9
(2,)
(0, 0) 4
(0, 1) 5
(1, 0) 10
(1, 1) 11
""")
add_newdoc('numpy.core', 'nditer', ('close',
"""
close()
Resolve all writeback semantics in writeable operands.
.. versionadded:: 1.15.0
See Also
--------
:ref:`nditer-context-manager`
"""))
###############################################################################
#
# broadcast
#
###############################################################################
add_newdoc('numpy.core', 'broadcast',
"""
Produce an object that mimics broadcasting.
Parameters
----------
in1, in2, ... : array_like
Input parameters.
Returns
-------
b : broadcast object
Broadcast the input parameters against one another, and
return an object that encapsulates the result.
Amongst others, it has ``shape`` and ``nd`` properties, and
may be used as an iterator.
See Also
--------
broadcast_arrays
broadcast_to
Examples
--------
Manually adding two vectors, using broadcasting:
>>> x = np.array([[1], [2], [3]])
>>> y = np.array([4, 5, 6])
>>> b = np.broadcast(x, y)
>>> out = np.empty(b.shape)
>>> out.flat = [u+v for (u,v) in b]
>>> out
array([[5., 6., 7.],
[6., 7., 8.],
[7., 8., 9.]])
Compare against built-in broadcasting:
>>> x + y
array([[5, 6, 7],
[6, 7, 8],
[7, 8, 9]])
""")
# attributes
add_newdoc('numpy.core', 'broadcast', ('index',
"""
current index in broadcasted result
Examples
--------
>>> x = np.array([[1], [2], [3]])
>>> y = np.array([4, 5, 6])
>>> b = np.broadcast(x, y)
>>> b.index
0
>>> next(b), next(b), next(b)
((1, 4), (1, 5), (1, 6))
>>> b.index
3
"""))
add_newdoc('numpy.core', 'broadcast', ('iters',
"""
tuple of iterators along ``self``'s "components."
Returns a tuple of `numpy.flatiter` objects, one for each "component"
of ``self``.
See Also
--------
numpy.flatiter
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> row, col = b.iters
>>> next(row), next(col)
(1, 4)
"""))
add_newdoc('numpy.core', 'broadcast', ('ndim',
"""
Number of dimensions of broadcasted result. Alias for `nd`.
.. versionadded:: 1.12.0
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.ndim
2
"""))
add_newdoc('numpy.core', 'broadcast', ('nd',
"""
Number of dimensions of broadcasted result. For code intended for NumPy
1.12.0 and later the more consistent `ndim` is preferred.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.nd
2
"""))
add_newdoc('numpy.core', 'broadcast', ('numiter',
"""
Number of iterators possessed by the broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.numiter
2
"""))
add_newdoc('numpy.core', 'broadcast', ('shape',
"""
Shape of broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.shape
(3, 3)
"""))
add_newdoc('numpy.core', 'broadcast', ('size',
"""
Total size of broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.size
9
"""))
add_newdoc('numpy.core', 'broadcast', ('reset',
"""
reset()
Reset the broadcasted result's iterator(s).
Parameters
----------
None
Returns
-------
None
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.index
0
>>> next(b), next(b), next(b)
((1, 4), (2, 4), (3, 4))
>>> b.index
3
>>> b.reset()
>>> b.index
0
"""))
###############################################################################
#
# numpy functions
#
###############################################################################
add_newdoc('numpy.core.multiarray', 'array',
"""
array(object, dtype=None, copy=True, order='K', subok=False, ndmin=0)
Create an array.
Parameters
----------
object : array_like
An array, any object exposing the array interface, an object whose
__array__ method returns an array, or any (nested) sequence.
dtype : data-type, optional
The desired data-type for the array. If not given, then the type will
be determined as the minimum type required to hold the objects in the
sequence.
copy : bool, optional
If true (default), then the object is copied. Otherwise, a copy will
only be made if __array__ returns a copy, if obj is a nested sequence,
or if a copy is needed to satisfy any of the other requirements
(`dtype`, `order`, etc.).
order : {'K', 'A', 'C', 'F'}, optional
Specify the memory layout of the array. If object is not an array, the
newly created array will be in C order (row major) unless 'F' is
specified, in which case it will be in Fortran order (column major).
If object is an array the following holds.
===== ========= ===================================================
order no copy copy=True
===== ========= ===================================================
'K' unchanged F & C order preserved, otherwise most similar order
'A' unchanged F order if input is F and not C, otherwise C order
'C' C order C order
'F' F order F order
===== ========= ===================================================
When ``copy=False`` and a copy is made for other reasons, the result is
the same as if ``copy=True``, with some exceptions for `A`, see the
Notes section. The default order is 'K'.
subok : bool, optional
If True, then sub-classes will be passed-through, otherwise
the returned array will be forced to be a base-class array (default).
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting
array should have. Ones will be pre-pended to the shape as
needed to meet this requirement.
Returns
-------
out : ndarray
An array object satisfying the specified requirements.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full_like : Return a new array with shape of input filled with value.
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
full : Return a new array of given shape filled with value.
Notes
-----
When order is 'A' and `object` is an array in neither 'C' nor 'F' order,
and a copy is forced by a change in dtype, then the order of the result is
not necessarily 'C' as expected. This is likely a bug.
Examples
--------
>>> np.array([1, 2, 3])
array([1, 2, 3])
Upcasting:
>>> np.array([1, 2, 3.0])
array([ 1., 2., 3.])
More than one dimension:
>>> np.array([[1, 2], [3, 4]])
array([[1, 2],
[3, 4]])
Minimum dimensions 2:
>>> np.array([1, 2, 3], ndmin=2)
array([[1, 2, 3]])
Type provided:
>>> np.array([1, 2, 3], dtype=complex)
array([ 1.+0.j, 2.+0.j, 3.+0.j])
Data-type consisting of more than one element:
>>> x = np.array([(1,2),(3,4)],dtype=[('a','<i4'),('b','<i4')])
>>> x['a']
array([1, 3])
Creating an array from sub-classes:
>>> np.array(np.mat('1 2; 3 4'))
array([[1, 2],
[3, 4]])
>>> np.array(np.mat('1 2; 3 4'), subok=True)
matrix([[1, 2],
[3, 4]])
""")
add_newdoc('numpy.core.multiarray', 'empty',
"""
empty(shape, dtype=float, order='C')
Return a new array of given shape and type, without initializing entries.
Parameters
----------
shape : int or tuple of int
Shape of the empty array, e.g., ``(2, 3)`` or ``2``.
dtype : data-type, optional
Desired output data-type for the array, e.g, `numpy.int8`. Default is
`numpy.float64`.
order : {'C', 'F'}, optional, default: 'C'
Whether to store multi-dimensional data in row-major
(C-style) or column-major (Fortran-style) order in
memory.
Returns
-------
out : ndarray
Array of uninitialized (arbitrary) data of the given shape, dtype, and
order. Object arrays will be initialized to None.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
full : Return a new array of given shape filled with value.
Notes
-----
`empty`, unlike `zeros`, does not set the array values to zero,
and may therefore be marginally faster. On the other hand, it requires
the user to manually set all the values in the array, and should be
used with caution.
Examples
--------
>>> np.empty([2, 2])
array([[ -9.74499359e+001, 6.69583040e-309],
[ 2.13182611e-314, 3.06959433e-309]]) #uninitialized
>>> np.empty([2, 2], dtype=int)
array([[-1073741821, -1067949133],
[ 496041986, 19249760]]) #uninitialized
""")
add_newdoc('numpy.core.multiarray', 'scalar',
"""
scalar(dtype, obj)
Return a new scalar array of the given type initialized with obj.
This function is meant mainly for pickle support. `dtype` must be a
valid data-type descriptor. If `dtype` corresponds to an object
descriptor, then `obj` can be any object, otherwise `obj` must be a
string. If `obj` is not given, it will be interpreted as None for object
type and as zeros for all other types.
""")
add_newdoc('numpy.core.multiarray', 'zeros',
"""
zeros(shape, dtype=float, order='C')
Return a new array of given shape and type, filled with zeros.
Parameters
----------
shape : int or tuple of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
dtype : data-type, optional
The desired data-type for the array, e.g., `numpy.int8`. Default is
`numpy.float64`.
order : {'C', 'F'}, optional, default: 'C'
Whether to store multi-dimensional data in row-major
(C-style) or column-major (Fortran-style) order in
memory.
Returns
-------
out : ndarray
Array of zeros with the given shape, dtype, and order.
See Also
--------
zeros_like : Return an array of zeros with shape and type of input.
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
full : Return a new array of given shape filled with value.
Examples
--------
>>> np.zeros(5)
array([ 0., 0., 0., 0., 0.])
>>> np.zeros((5,), dtype=int)
array([0, 0, 0, 0, 0])
>>> np.zeros((2, 1))
array([[ 0.],
[ 0.]])
>>> s = (2,2)
>>> np.zeros(s)
array([[ 0., 0.],
[ 0., 0.]])
>>> np.zeros((2,), dtype=[('x', 'i4'), ('y', 'i4')]) # custom dtype
array([(0, 0), (0, 0)],
dtype=[('x', '<i4'), ('y', '<i4')])
""")
add_newdoc('numpy.core.multiarray', 'set_typeDict',
"""set_typeDict(dict)
Set the internal dictionary that can look up an array type using a
registered code.
""")
add_newdoc('numpy.core.multiarray', 'fromstring',
"""
fromstring(string, dtype=float, count=-1, sep='')
A new 1-D array initialized from text data in a string.
Parameters
----------
string : str
A string containing the data.
dtype : data-type, optional
The data type of the array; default: float. For binary input data,
the data must be in exactly this format.
count : int, optional
Read this number of `dtype` elements from the data. If this is
negative (the default), the count will be determined from the
length of the data.
sep : str, optional
The string separating numbers in the data; extra whitespace between
elements is also ignored.
.. deprecated:: 1.14
Passing ``sep=''``, the default, is deprecated since it will
trigger the deprecated binary mode of this function. This mode
interprets `string` as binary bytes, rather than ASCII text with
decimal numbers, an operation which is better spelt
``frombuffer(string, dtype, count)``. If `string` contains unicode
text, the binary mode of `fromstring` will first encode it into
bytes using either utf-8 (python 3) or the default encoding
(python 2), neither of which produce sane results.
Returns
-------
arr : ndarray
The constructed array.
Raises
------
ValueError
If the string is not the correct size to satisfy the requested
`dtype` and `count`.
See Also
--------
frombuffer, fromfile, fromiter
Examples
--------
>>> np.fromstring('1 2', dtype=int, sep=' ')
array([1, 2])
>>> np.fromstring('1, 2', dtype=int, sep=',')
array([1, 2])
""")
add_newdoc('numpy.core.multiarray', 'compare_chararrays',
"""
compare_chararrays(a, b, cmp_op, rstrip)
Performs element-wise comparison of two string arrays using the
comparison operator specified by `cmp_op`.
Parameters
----------
a, b : array_like
Arrays to be compared.
cmp_op : {"<", "<=", "==", ">=", ">", "!="}
Type of comparison.
rstrip : Boolean
If True, the spaces at the end of Strings are removed before the comparison.
Returns
-------
out : ndarray
The output array of type Boolean with the same shape as a and b.
Raises
------
ValueError
If `cmp_op` is not valid.
TypeError
If at least one of `a` or `b` is a non-string array
Examples
--------
>>> a = np.array(["a", "b", "cde"])
>>> b = np.array(["a", "a", "dec"])
>>> np.compare_chararrays(a, b, ">", True)
array([False, True, False])
""")
add_newdoc('numpy.core.multiarray', 'fromiter',
"""
fromiter(iterable, dtype, count=-1)
Create a new 1-dimensional array from an iterable object.
Parameters
----------
iterable : iterable object
An iterable object providing data for the array.
dtype : data-type
The data-type of the returned array.
count : int, optional
The number of items to read from *iterable*. The default is -1,
which means all data is read.
Returns
-------
out : ndarray
The output array.
Notes
-----
Specify `count` to improve performance. It allows ``fromiter`` to
pre-allocate the output array, instead of resizing it on demand.
Examples
--------
>>> iterable = (x*x for x in range(5))
>>> np.fromiter(iterable, float)
array([ 0., 1., 4., 9., 16.])
""")
add_newdoc('numpy.core.multiarray', 'fromfile',
"""
fromfile(file, dtype=float, count=-1, sep='', offset=0)
Construct an array from data in a text or binary file.
A highly efficient way of reading binary data with a known data-type,
as well as parsing simply formatted text files. Data written using the
`tofile` method can be read using this function.
Parameters
----------
file : file or str or Path
Open file object or filename.
.. versionchanged:: 1.17.0
`pathlib.Path` objects are now accepted.
dtype : data-type
Data type of the returned array.
For binary files, it is used to determine the size and byte-order
of the items in the file.
count : int
Number of items to read. ``-1`` means all items (i.e., the complete
file).
sep : str
Separator between items if file is a text file.
Empty ("") separator means the file should be treated as binary.
Spaces (" ") in the separator match zero or more whitespace characters.
A separator consisting only of spaces must match at least one
whitespace.
offset : int
The offset (in bytes) from the file's current position. Defaults to 0.
Only permitted for binary files.
.. versionadded:: 1.17.0
See also
--------
load, save
ndarray.tofile
loadtxt : More flexible way of loading data from a text file.
Notes
-----
Do not rely on the combination of `tofile` and `fromfile` for
data storage, as the binary files generated are are not platform
independent. In particular, no byte-order or data-type information is
saved. Data can be stored in the platform independent ``.npy`` format
using `save` and `load` instead.
Examples
--------
Construct an ndarray:
>>> dt = np.dtype([('time', [('min', np.int64), ('sec', np.int64)]),
... ('temp', float)])
>>> x = np.zeros((1,), dtype=dt)
>>> x['time']['min'] = 10; x['temp'] = 98.25
>>> x
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i8'), ('sec', '<i8')]), ('temp', '<f8')])
Save the raw data to disk:
>>> import tempfile
>>> fname = tempfile.mkstemp()[1]
>>> x.tofile(fname)
Read the raw data from disk:
>>> np.fromfile(fname, dtype=dt)
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i8'), ('sec', '<i8')]), ('temp', '<f8')])
The recommended way to store and load data:
>>> np.save(fname, x)
>>> np.load(fname + '.npy')
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i8'), ('sec', '<i8')]), ('temp', '<f8')])
""")
add_newdoc('numpy.core.multiarray', 'frombuffer',
"""
frombuffer(buffer, dtype=float, count=-1, offset=0)
Interpret a buffer as a 1-dimensional array.
Parameters
----------
buffer : buffer_like
An object that exposes the buffer interface.
dtype : data-type, optional
Data-type of the returned array; default: float.
count : int, optional
Number of items to read. ``-1`` means all data in the buffer.
offset : int, optional
Start reading the buffer from this offset (in bytes); default: 0.
Notes
-----
If the buffer has data that is not in machine byte-order, this should
be specified as part of the data-type, e.g.::
>>> dt = np.dtype(int)
>>> dt = dt.newbyteorder('>')
>>> np.frombuffer(buf, dtype=dt) # doctest: +SKIP
The data of the resulting array will not be byteswapped, but will be
interpreted correctly.
Examples
--------
>>> s = b'hello world'
>>> np.frombuffer(s, dtype='S1', count=5, offset=6)
array([b'w', b'o', b'r', b'l', b'd'], dtype='|S1')
>>> np.frombuffer(b'\\x01\\x02', dtype=np.uint8)
array([1, 2], dtype=uint8)
>>> np.frombuffer(b'\\x01\\x02\\x03\\x04\\x05', dtype=np.uint8, count=3)
array([1, 2, 3], dtype=uint8)
""")
add_newdoc('numpy.core', 'fastCopyAndTranspose',
"""_fastCopyAndTranspose(a)""")
add_newdoc('numpy.core.multiarray', 'correlate',
"""cross_correlate(a,v, mode=0)""")
add_newdoc('numpy.core.multiarray', 'arange',
"""
arange([start,] stop[, step,], dtype=None)
Return evenly spaced values within a given interval.
Values are generated within the half-open interval ``[start, stop)``
(in other words, the interval including `start` but excluding `stop`).
For integer arguments the function is equivalent to the Python built-in
`range` function, but returns an ndarray rather than a list.
When using a non-integer step, such as 0.1, the results will often not
be consistent. It is better to use `numpy.linspace` for these cases.
Parameters
----------
start : number, optional
Start of interval. The interval includes this value. The default
start value is 0.
stop : number
End of interval. The interval does not include this value, except
in some cases where `step` is not an integer and floating point
round-off affects the length of `out`.
step : number, optional
Spacing between values. For any output `out`, this is the distance
between two adjacent values, ``out[i+1] - out[i]``. The default
step size is 1. If `step` is specified as a position argument,
`start` must also be given.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
Returns
-------
arange : ndarray
Array of evenly spaced values.
For floating point arguments, the length of the result is
``ceil((stop - start)/step)``. Because of floating point overflow,
this rule may result in the last element of `out` being greater
than `stop`.
See Also
--------
linspace : Evenly spaced numbers with careful handling of endpoints.
ogrid: Arrays of evenly spaced numbers in N-dimensions.
mgrid: Grid-shaped arrays of evenly spaced numbers in N-dimensions.
Examples
--------
>>> np.arange(3)
array([0, 1, 2])
>>> np.arange(3.0)
array([ 0., 1., 2.])
>>> np.arange(3,7)
array([3, 4, 5, 6])
>>> np.arange(3,7,2)
array([3, 5])
""")
add_newdoc('numpy.core.multiarray', '_get_ndarray_c_version',
"""_get_ndarray_c_version()
Return the compile time NPY_VERSION (formerly called NDARRAY_VERSION) number.
""")
add_newdoc('numpy.core.multiarray', '_reconstruct',
"""_reconstruct(subtype, shape, dtype)
Construct an empty array. Used by Pickles.
""")
add_newdoc('numpy.core.multiarray', 'set_string_function',
"""
set_string_function(f, repr=1)
Internal method to set a function to be used when pretty printing arrays.
""")
add_newdoc('numpy.core.multiarray', 'set_numeric_ops',
"""
set_numeric_ops(op1=func1, op2=func2, ...)
Set numerical operators for array objects.
.. deprecated:: 1.16
For the general case, use :c:func:`PyUFunc_ReplaceLoopBySignature`.
For ndarray subclasses, define the ``__array_ufunc__`` method and
override the relevant ufunc.
Parameters
----------
op1, op2, ... : callable
Each ``op = func`` pair describes an operator to be replaced.
For example, ``add = lambda x, y: np.add(x, y) % 5`` would replace
addition by modulus 5 addition.
Returns
-------
saved_ops : list of callables
A list of all operators, stored before making replacements.
Notes
-----
.. WARNING::
Use with care! Incorrect usage may lead to memory errors.
A function replacing an operator cannot make use of that operator.
For example, when replacing add, you may not use ``+``. Instead,
directly call ufuncs.
Examples
--------
>>> def add_mod5(x, y):
... return np.add(x, y) % 5
...
>>> old_funcs = np.set_numeric_ops(add=add_mod5)
>>> x = np.arange(12).reshape((3, 4))
>>> x + x
array([[0, 2, 4, 1],
[3, 0, 2, 4],
[1, 3, 0, 2]])
>>> ignore = np.set_numeric_ops(**old_funcs) # restore operators
""")
add_newdoc('numpy.core.multiarray', 'promote_types',
"""
promote_types(type1, type2)
Returns the data type with the smallest size and smallest scalar
kind to which both ``type1`` and ``type2`` may be safely cast.
The returned data type is always in native byte order.
This function is symmetric, but rarely associative.
Parameters
----------
type1 : dtype or dtype specifier
First data type.
type2 : dtype or dtype specifier
Second data type.
Returns
-------
out : dtype
The promoted data type.
Notes
-----
.. versionadded:: 1.6.0
Starting in NumPy 1.9, promote_types function now returns a valid string
length when given an integer or float dtype as one argument and a string
dtype as another argument. Previously it always returned the input string
dtype, even if it wasn't long enough to store the max integer/float value
converted to a string.
See Also
--------
result_type, dtype, can_cast
Examples
--------
>>> np.promote_types('f4', 'f8')
dtype('float64')
>>> np.promote_types('i8', 'f4')
dtype('float64')
>>> np.promote_types('>i8', '<c8')
dtype('complex128')
>>> np.promote_types('i4', 'S8')
dtype('S11')
An example of a non-associative case:
>>> p = np.promote_types
>>> p('S', p('i1', 'u1'))
dtype('S6')
>>> p(p('S', 'i1'), 'u1')
dtype('S4')
""")
if sys.version_info.major < 3:
add_newdoc('numpy.core.multiarray', 'newbuffer',
"""
newbuffer(size)
Return a new uninitialized buffer object.
Parameters
----------
size : int
Size in bytes of returned buffer object.
Returns
-------
newbuffer : buffer object
Returned, uninitialized buffer object of `size` bytes.
""")
add_newdoc('numpy.core.multiarray', 'getbuffer',
"""
getbuffer(obj [,offset[, size]])
Create a buffer object from the given object referencing a slice of
length size starting at offset.
Default is the entire buffer. A read-write buffer is attempted followed
by a read-only buffer.
Parameters
----------
obj : object
offset : int, optional
size : int, optional
Returns
-------
buffer_obj : buffer
Examples
--------
>>> buf = np.getbuffer(np.ones(5), 1, 3)
>>> len(buf)
3
>>> buf[0]
'\\x00'
>>> buf
<read-write buffer for 0x8af1e70, size 3, offset 1 at 0x8ba4ec0>
""")
add_newdoc('numpy.core.multiarray', 'c_einsum',
"""
c_einsum(subscripts, *operands, out=None, dtype=None, order='K',
casting='safe')
*This documentation shadows that of the native python implementation of the `einsum` function,
except all references and examples related to the `optimize` argument (v 0.12.0) have been removed.*
Evaluates the Einstein summation convention on the operands.
Using the Einstein summation convention, many common multi-dimensional,
linear algebraic array operations can be represented in a simple fashion.
In *implicit* mode `einsum` computes these values.
In *explicit* mode, `einsum` provides further flexibility to compute
other array operations that might not be considered classical Einstein
summation operations, by disabling, or forcing summation over specified
subscript labels.
See the notes and examples for clarification.
Parameters
----------
subscripts : str
Specifies the subscripts for summation as comma separated list of
subscript labels. An implicit (classical Einstein summation)
calculation is performed unless the explicit indicator '->' is
included as well as subscript labels of the precise output form.
operands : list of array_like
These are the arrays for the operation.
out : ndarray, optional
If provided, the calculation is done into this array.
dtype : {data-type, None}, optional
If provided, forces the calculation to use the data type specified.
Note that you may have to also give a more liberal `casting`
parameter to allow the conversions. Default is None.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the output. 'C' means it should
be C contiguous. 'F' means it should be Fortran contiguous,
'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise.
'K' means it should be as close to the layout as the inputs as
is possible, including arbitrarily permuted axes.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Setting this to
'unsafe' is not recommended, as it can adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
Default is 'safe'.
optimize : {False, True, 'greedy', 'optimal'}, optional
Controls if intermediate optimization should occur. No optimization
will occur if False and True will default to the 'greedy' algorithm.
Also accepts an explicit contraction list from the ``np.einsum_path``
function. See ``np.einsum_path`` for more details. Defaults to False.
Returns
-------
output : ndarray
The calculation based on the Einstein summation convention.
See Also
--------
einsum_path, dot, inner, outer, tensordot, linalg.multi_dot
Notes
-----
.. versionadded:: 1.6.0
The Einstein summation convention can be used to compute
many multi-dimensional, linear algebraic array operations. `einsum`
provides a succinct way of representing these.
A non-exhaustive list of these operations,
which can be computed by `einsum`, is shown below along with examples:
* Trace of an array, :py:func:`numpy.trace`.
* Return a diagonal, :py:func:`numpy.diag`.
* Array axis summations, :py:func:`numpy.sum`.
* Transpositions and permutations, :py:func:`numpy.transpose`.
* Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`.
* Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`.
* Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`.
* Tensor contractions, :py:func:`numpy.tensordot`.
* Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`.
The subscripts string is a comma-separated list of subscript labels,
where each label refers to a dimension of the corresponding operand.
Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)``
is equivalent to :py:func:`np.inner(a,b) <numpy.inner>`. If a label
appears only once, it is not summed, so ``np.einsum('i', a)`` produces a
view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)``
describes traditional matrix multiplication and is equivalent to
:py:func:`np.matmul(a,b) <numpy.matmul>`. Repeated subscript labels in one
operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent
to :py:func:`np.trace(a) <numpy.trace>`.
In *implicit mode*, the chosen subscripts are important
since the axes of the output are reordered alphabetically. This
means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
``np.einsum('ji', a)`` takes its transpose. Additionally,
``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while,
``np.einsum('ij,jh', a, b)`` returns the transpose of the
multiplication since subscript 'h' precedes subscript 'i'.
In *explicit mode* the output can be directly controlled by
specifying output subscript labels. This requires the
identifier '->' as well as the list of output subscript labels.
This feature increases the flexibility of the function since
summing can be disabled or forced when required. The call
``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) <numpy.sum>`,
and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) <numpy.diag>`.
The difference is that `einsum` does not allow broadcasting by default.
Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the
order of the output subscript labels and therefore returns matrix
multiplication, unlike the example above in implicit mode.
To enable and control broadcasting, use an ellipsis. Default
NumPy-style broadcasting is done by adding an ellipsis
to the left of each term, like ``np.einsum('...ii->...i', a)``.
To take the trace along the first and last axes,
you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
product with the left-most indices instead of rightmost, one can do
``np.einsum('ij...,jk...->ik...', a, b)``.
When there is only one operand, no axes are summed, and no output
parameter is provided, a view into the operand is returned instead
of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
produces a view (changed in version 1.10.0).
`einsum` also provides an alternative way to provide the subscripts
and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``.
If the output shape is not provided in this format `einsum` will be
calculated in implicit mode, otherwise it will be performed explicitly.
The examples below have corresponding `einsum` calls with the two
parameter methods.
.. versionadded:: 1.10.0
Views returned from einsum are now writeable whenever the input array
is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now
have the same effect as :py:func:`np.swapaxes(a, 0, 2) <numpy.swapaxes>`
and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal
of a 2D array.
Examples
--------
>>> a = np.arange(25).reshape(5,5)
>>> b = np.arange(5)
>>> c = np.arange(6).reshape(2,3)
Trace of a matrix:
>>> np.einsum('ii', a)
60
>>> np.einsum(a, [0,0])
60
>>> np.trace(a)
60
Extract the diagonal (requires explicit form):
>>> np.einsum('ii->i', a)
array([ 0, 6, 12, 18, 24])
>>> np.einsum(a, [0,0], [0])
array([ 0, 6, 12, 18, 24])
>>> np.diag(a)
array([ 0, 6, 12, 18, 24])
Sum over an axis (requires explicit form):
>>> np.einsum('ij->i', a)
array([ 10, 35, 60, 85, 110])
>>> np.einsum(a, [0,1], [0])
array([ 10, 35, 60, 85, 110])
>>> np.sum(a, axis=1)
array([ 10, 35, 60, 85, 110])
For higher dimensional arrays summing a single axis can be done with ellipsis:
>>> np.einsum('...j->...', a)
array([ 10, 35, 60, 85, 110])
>>> np.einsum(a, [Ellipsis,1], [Ellipsis])
array([ 10, 35, 60, 85, 110])
Compute a matrix transpose, or reorder any number of axes:
>>> np.einsum('ji', c)
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum('ij->ji', c)
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum(c, [1,0])
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.transpose(c)
array([[0, 3],
[1, 4],
[2, 5]])
Vector inner products:
>>> np.einsum('i,i', b, b)
30
>>> np.einsum(b, [0], b, [0])
30
>>> np.inner(b,b)
30
Matrix vector multiplication:
>>> np.einsum('ij,j', a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum(a, [0,1], b, [1])
array([ 30, 80, 130, 180, 230])
>>> np.dot(a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum('...j,j', a, b)
array([ 30, 80, 130, 180, 230])
Broadcasting and scalar multiplication:
>>> np.einsum('..., ...', 3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum(',ij', 3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum(3, [Ellipsis], c, [Ellipsis])
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.multiply(3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
Vector outer product:
>>> np.einsum('i,j', np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.einsum(np.arange(2)+1, [0], b, [1])
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.outer(np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
Tensor contraction:
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> np.einsum('ijk,jil->kl', a, b)
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3])
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> np.tensordot(a,b, axes=([1,0],[0,1]))
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
Writeable returned arrays (since version 1.10.0):
>>> a = np.zeros((3, 3))
>>> np.einsum('ii->i', a)[:] = 1
>>> a
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Example of ellipsis use:
>>> a = np.arange(6).reshape((3,2))
>>> b = np.arange(12).reshape((4,3))
>>> np.einsum('ki,jk->ij', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> np.einsum('ki,...k->i...', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> np.einsum('k...,jk', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
""")
##############################################################################
#
# Documentation for ndarray attributes and methods
#
##############################################################################
##############################################################################
#
# ndarray object
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray',
"""
ndarray(shape, dtype=float, buffer=None, offset=0,
strides=None, order=None)
An array object represents a multidimensional, homogeneous array
of fixed-size items. An associated data-type object describes the
format of each element in the array (its byte-order, how many bytes it
occupies in memory, whether it is an integer, a floating point number,
or something else, etc.)
Arrays should be constructed using `array`, `zeros` or `empty` (refer
to the See Also section below). The parameters given here refer to
a low-level method (`ndarray(...)`) for instantiating an array.
For more information, refer to the `numpy` module and examine the
methods and attributes of an array.
Parameters
----------
(for the __new__ method; see Notes below)
shape : tuple of ints
Shape of created array.
dtype : data-type, optional
Any object that can be interpreted as a numpy data type.
buffer : object exposing buffer interface, optional
Used to fill the array with data.
offset : int, optional
Offset of array data in buffer.
strides : tuple of ints, optional
Strides of data in memory.
order : {'C', 'F'}, optional
Row-major (C-style) or column-major (Fortran-style) order.
Attributes
----------
T : ndarray
Transpose of the array.
data : buffer
The array's elements, in memory.
dtype : dtype object
Describes the format of the elements in the array.
flags : dict
Dictionary containing information related to memory use, e.g.,
'C_CONTIGUOUS', 'OWNDATA', 'WRITEABLE', etc.
flat : numpy.flatiter object
Flattened version of the array as an iterator. The iterator
allows assignments, e.g., ``x.flat = 3`` (See `ndarray.flat` for
assignment examples; TODO).
imag : ndarray
Imaginary part of the array.
real : ndarray
Real part of the array.
size : int
Number of elements in the array.
itemsize : int
The memory use of each array element in bytes.
nbytes : int
The total number of bytes required to store the array data,
i.e., ``itemsize * size``.
ndim : int
The array's number of dimensions.
shape : tuple of ints
Shape of the array.
strides : tuple of ints
The step-size required to move from one element to the next in
memory. For example, a contiguous ``(3, 4)`` array of type
``int16`` in C-order has strides ``(8, 2)``. This implies that
to move from element to element in memory requires jumps of 2 bytes.
To move from row-to-row, one needs to jump 8 bytes at a time
(``2 * 4``).
ctypes : ctypes object
Class containing properties of the array needed for interaction
with ctypes.
base : ndarray
If the array is a view into another array, that array is its `base`
(unless that array is also a view). The `base` array is where the
array data is actually stored.
See Also
--------
array : Construct an array.
zeros : Create an array, each element of which is zero.
empty : Create an array, but leave its allocated memory unchanged (i.e.,
it contains "garbage").
dtype : Create a data-type.
Notes
-----
There are two modes of creating an array using ``__new__``:
1. If `buffer` is None, then only `shape`, `dtype`, and `order`
are used.
2. If `buffer` is an object exposing the buffer interface, then
all keywords are interpreted.
No ``__init__`` method is needed because the array is fully initialized
after the ``__new__`` method.
Examples
--------
These examples illustrate the low-level `ndarray` constructor. Refer
to the `See Also` section above for easier ways of constructing an
ndarray.
First mode, `buffer` is None:
>>> np.ndarray(shape=(2,2), dtype=float, order='F')
array([[0.0e+000, 0.0e+000], # random
[ nan, 2.5e-323]])
Second mode:
>>> np.ndarray((2,), buffer=np.array([1,2,3]),
... offset=np.int_().itemsize,
... dtype=int) # offset = 1*itemsize, i.e. skip first element
array([2, 3])
""")
##############################################################################
#
# ndarray attributes
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_interface__',
"""Array protocol: Python side."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_finalize__',
"""None."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_priority__',
"""Array priority."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_struct__',
"""Array protocol: C-struct side."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('base',
"""
Base object if memory is from some other object.
Examples
--------
The base of an array that owns its memory is None:
>>> x = np.array([1,2,3,4])
>>> x.base is None
True
Slicing creates a view, whose memory is shared with x:
>>> y = x[2:]
>>> y.base is x
True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ctypes',
"""
An object to simplify the interaction of the array with the ctypes
module.
This attribute creates an object that makes it easier to use arrays
when calling shared libraries with the ctypes module. The returned
object has, among others, data, shape, and strides attributes (see
Notes below) which themselves return ctypes objects that can be used
as arguments to a shared library.
Parameters
----------
None
Returns
-------
c : Python object
Possessing attributes data, shape, strides, etc.
See Also
--------
numpy.ctypeslib
Notes
-----
Below are the public attributes of this object which were documented
in "Guide to NumPy" (we have omitted undocumented public attributes,
as well as documented private attributes):
.. autoattribute:: numpy.core._internal._ctypes.data
:noindex:
.. autoattribute:: numpy.core._internal._ctypes.shape
:noindex:
.. autoattribute:: numpy.core._internal._ctypes.strides
:noindex:
.. automethod:: numpy.core._internal._ctypes.data_as
:noindex:
.. automethod:: numpy.core._internal._ctypes.shape_as
:noindex:
.. automethod:: numpy.core._internal._ctypes.strides_as
:noindex:
If the ctypes module is not available, then the ctypes attribute
of array objects still returns something useful, but ctypes objects
are not returned and errors may be raised instead. In particular,
the object will still have the ``as_parameter`` attribute which will
return an integer equal to the data attribute.
Examples
--------
>>> import ctypes
>>> x
array([[0, 1],
[2, 3]])
>>> x.ctypes.data
30439712
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long))
<ctypes.LP_c_long object at 0x01F01300>
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long)).contents
c_long(0)
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_longlong)).contents
c_longlong(4294967296L)
>>> x.ctypes.shape
<numpy.core._internal.c_long_Array_2 object at 0x01FFD580>
>>> x.ctypes.shape_as(ctypes.c_long)
<numpy.core._internal.c_long_Array_2 object at 0x01FCE620>
>>> x.ctypes.strides
<numpy.core._internal.c_long_Array_2 object at 0x01FCE620>
>>> x.ctypes.strides_as(ctypes.c_longlong)
<numpy.core._internal.c_longlong_Array_2 object at 0x01F01300>
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('data',
"""Python buffer object pointing to the start of the array's data."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dtype',
"""
Data-type of the array's elements.
Parameters
----------
None
Returns
-------
d : numpy dtype object
See Also
--------
numpy.dtype
Examples
--------
>>> x
array([[0, 1],
[2, 3]])
>>> x.dtype
dtype('int32')
>>> type(x.dtype)
<type 'numpy.dtype'>
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('imag',
"""
The imaginary part of the array.
Examples
--------
>>> x = np.sqrt([1+0j, 0+1j])
>>> x.imag
array([ 0. , 0.70710678])
>>> x.imag.dtype
dtype('float64')
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('itemsize',
"""
Length of one array element in bytes.
Examples
--------
>>> x = np.array([1,2,3], dtype=np.float64)
>>> x.itemsize
8
>>> x = np.array([1,2,3], dtype=np.complex128)
>>> x.itemsize
16
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flags',
"""
Information about the memory layout of the array.
Attributes
----------
C_CONTIGUOUS (C)
The data is in a single, C-style contiguous segment.
F_CONTIGUOUS (F)
The data is in a single, Fortran-style contiguous segment.
OWNDATA (O)
The array owns the memory it uses or borrows it from another object.
WRITEABLE (W)
The data area can be written to. Setting this to False locks
the data, making it read-only. A view (slice, etc.) inherits WRITEABLE
from its base array at creation time, but a view of a writeable
array may be subsequently locked while the base array remains writeable.
(The opposite is not true, in that a view of a locked array may not
be made writeable. However, currently, locking a base object does not
lock any views that already reference it, so under that circumstance it
is possible to alter the contents of a locked array via a previously
created writeable view onto it.) Attempting to change a non-writeable
array raises a RuntimeError exception.
ALIGNED (A)
The data and all elements are aligned appropriately for the hardware.
WRITEBACKIFCOPY (X)
This array is a copy of some other array. The C-API function
PyArray_ResolveWritebackIfCopy must be called before deallocating
to the base array will be updated with the contents of this array.
UPDATEIFCOPY (U)
(Deprecated, use WRITEBACKIFCOPY) This array is a copy of some other array.
When this array is
deallocated, the base array will be updated with the contents of
this array.
FNC
F_CONTIGUOUS and not C_CONTIGUOUS.
FORC
F_CONTIGUOUS or C_CONTIGUOUS (one-segment test).
BEHAVED (B)
ALIGNED and WRITEABLE.
CARRAY (CA)
BEHAVED and C_CONTIGUOUS.
FARRAY (FA)
BEHAVED and F_CONTIGUOUS and not C_CONTIGUOUS.
Notes
-----
The `flags` object can be accessed dictionary-like (as in ``a.flags['WRITEABLE']``),
or by using lowercased attribute names (as in ``a.flags.writeable``). Short flag
names are only supported in dictionary access.
Only the WRITEBACKIFCOPY, UPDATEIFCOPY, WRITEABLE, and ALIGNED flags can be
changed by the user, via direct assignment to the attribute or dictionary
entry, or by calling `ndarray.setflags`.
The array flags cannot be set arbitrarily:
- UPDATEIFCOPY can only be set ``False``.
- WRITEBACKIFCOPY can only be set ``False``.
- ALIGNED can only be set ``True`` if the data is truly aligned.
- WRITEABLE can only be set ``True`` if the array owns its own memory
or the ultimate owner of the memory exposes a writeable buffer
interface or is a string.
Arrays can be both C-style and Fortran-style contiguous simultaneously.
This is clear for 1-dimensional arrays, but can also be true for higher
dimensional arrays.
Even for contiguous arrays a stride for a given dimension
``arr.strides[dim]`` may be *arbitrary* if ``arr.shape[dim] == 1``
or the array has no elements.
It does *not* generally hold that ``self.strides[-1] == self.itemsize``
for C-style contiguous arrays or ``self.strides[0] == self.itemsize`` for
Fortran-style contiguous arrays is true.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flat',
"""
A 1-D iterator over the array.
This is a `numpy.flatiter` instance, which acts similarly to, but is not
a subclass of, Python's built-in iterator object.
See Also
--------
flatten : Return a copy of the array collapsed into one dimension.
flatiter
Examples
--------
>>> x = np.arange(1, 7).reshape(2, 3)
>>> x
array([[1, 2, 3],
[4, 5, 6]])
>>> x.flat[3]
4
>>> x.T
array([[1, 4],
[2, 5],
[3, 6]])
>>> x.T.flat[3]
5
>>> type(x.flat)
<class 'numpy.flatiter'>
An assignment example:
>>> x.flat = 3; x
array([[3, 3, 3],
[3, 3, 3]])
>>> x.flat[[1,4]] = 1; x
array([[3, 1, 3],
[3, 1, 3]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('nbytes',
"""
Total bytes consumed by the elements of the array.
Notes
-----
Does not include memory consumed by non-element attributes of the
array object.
Examples
--------
>>> x = np.zeros((3,5,2), dtype=np.complex128)
>>> x.nbytes
480
>>> np.prod(x.shape) * x.itemsize
480
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ndim',
"""
Number of array dimensions.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> x.ndim
1
>>> y = np.zeros((2, 3, 4))
>>> y.ndim
3
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('real',
"""
The real part of the array.
Examples
--------
>>> x = np.sqrt([1+0j, 0+1j])
>>> x.real
array([ 1. , 0.70710678])
>>> x.real.dtype
dtype('float64')
See Also
--------
numpy.real : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('shape',
"""
Tuple of array dimensions.
The shape property is usually used to get the current shape of an array,
but may also be used to reshape the array in-place by assigning a tuple of
array dimensions to it. As with `numpy.reshape`, one of the new shape
dimensions can be -1, in which case its value is inferred from the size of
the array and the remaining dimensions. Reshaping an array in-place will
fail if a copy is required.
Examples
--------
>>> x = np.array([1, 2, 3, 4])
>>> x.shape
(4,)
>>> y = np.zeros((2, 3, 4))
>>> y.shape
(2, 3, 4)
>>> y.shape = (3, 8)
>>> y
array([[ 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0.]])
>>> y.shape = (3, 6)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: total size of new array must be unchanged
>>> np.zeros((4,2))[::2].shape = (-1,)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: incompatible shape for a non-contiguous array
See Also
--------
numpy.reshape : similar function
ndarray.reshape : similar method
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('size',
"""
Number of elements in the array.
Equal to ``np.prod(a.shape)``, i.e., the product of the array's
dimensions.
Notes
-----
`a.size` returns a standard arbitrary precision Python integer. This
may not be the case with other methods of obtaining the same value
(like the suggested ``np.prod(a.shape)``, which returns an instance
of ``np.int_``), and may be relevant if the value is used further in
calculations that may overflow a fixed size integer type.
Examples
--------
>>> x = np.zeros((3, 5, 2), dtype=np.complex128)
>>> x.size
30
>>> np.prod(x.shape)
30
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('strides',
"""
Tuple of bytes to step in each dimension when traversing an array.
The byte offset of element ``(i[0], i[1], ..., i[n])`` in an array `a`
is::
offset = sum(np.array(i) * a.strides)
A more detailed explanation of strides can be found in the
"ndarray.rst" file in the NumPy reference guide.
Notes
-----
Imagine an array of 32-bit integers (each 4 bytes)::
x = np.array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]], dtype=np.int32)
This array is stored in memory as 40 bytes, one after the other
(known as a contiguous block of memory). The strides of an array tell
us how many bytes we have to skip in memory to move to the next position
along a certain axis. For example, we have to skip 4 bytes (1 value) to
move to the next column, but 20 bytes (5 values) to get to the same
position in the next row. As such, the strides for the array `x` will be
``(20, 4)``.
See Also
--------
numpy.lib.stride_tricks.as_strided
Examples
--------
>>> y = np.reshape(np.arange(2*3*4), (2,3,4))
>>> y
array([[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]])
>>> y.strides
(48, 16, 4)
>>> y[1,1,1]
17
>>> offset=sum(y.strides * np.array((1,1,1)))
>>> offset/y.itemsize
17
>>> x = np.reshape(np.arange(5*6*7*8), (5,6,7,8)).transpose(2,3,1,0)
>>> x.strides
(32, 4, 224, 1344)
>>> i = np.array([3,5,2,2])
>>> offset = sum(i * x.strides)
>>> x[3,5,2,2]
813
>>> offset / x.itemsize
813
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('T',
"""
The transposed array.
Same as ``self.transpose()``.
Examples
--------
>>> x = np.array([[1.,2.],[3.,4.]])
>>> x
array([[ 1., 2.],
[ 3., 4.]])
>>> x.T
array([[ 1., 3.],
[ 2., 4.]])
>>> x = np.array([1.,2.,3.,4.])
>>> x
array([ 1., 2., 3., 4.])
>>> x.T
array([ 1., 2., 3., 4.])
See Also
--------
transpose
"""))
##############################################################################
#
# ndarray methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array__',
""" a.__array__(|dtype) -> reference if type unchanged, copy otherwise.
Returns either a new reference to self if dtype is not given or a new array
of provided data type if dtype is different from the current dtype of the
array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_prepare__',
"""a.__array_prepare__(obj) -> Object of same type as ndarray object obj.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_wrap__',
"""a.__array_wrap__(obj) -> Object of same type as ndarray object a.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__copy__',
"""a.__copy__()
Used if :func:`copy.copy` is called on an array. Returns a copy of the array.
Equivalent to ``a.copy(order='K')``.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__deepcopy__',
"""a.__deepcopy__(memo, /) -> Deep copy of array.
Used if :func:`copy.deepcopy` is called on an array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__reduce__',
"""a.__reduce__()
For pickling.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__setstate__',
"""a.__setstate__(state, /)
For unpickling.
The `state` argument must be a sequence that contains the following
elements:
Parameters
----------
version : int
optional pickle version. If omitted defaults to 0.
shape : tuple
dtype : data-type
isFortran : bool
rawdata : string or list
a binary string with the data (or a list if 'a' is an object array)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('all',
"""
a.all(axis=None, out=None, keepdims=False)
Returns True if all elements evaluate to True.
Refer to `numpy.all` for full documentation.
See Also
--------
numpy.all : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('any',
"""
a.any(axis=None, out=None, keepdims=False)
Returns True if any of the elements of `a` evaluate to True.
Refer to `numpy.any` for full documentation.
See Also
--------
numpy.any : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argmax',
"""
a.argmax(axis=None, out=None)
Return indices of the maximum values along the given axis.
Refer to `numpy.argmax` for full documentation.
See Also
--------
numpy.argmax : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argmin',
"""
a.argmin(axis=None, out=None)
Return indices of the minimum values along the given axis of `a`.
Refer to `numpy.argmin` for detailed documentation.
See Also
--------
numpy.argmin : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argsort',
"""
a.argsort(axis=-1, kind=None, order=None)
Returns the indices that would sort this array.
Refer to `numpy.argsort` for full documentation.
See Also
--------
numpy.argsort : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argpartition',
"""
a.argpartition(kth, axis=-1, kind='introselect', order=None)
Returns the indices that would partition this array.
Refer to `numpy.argpartition` for full documentation.
.. versionadded:: 1.8.0
See Also
--------
numpy.argpartition : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('astype',
"""
a.astype(dtype, order='K', casting='unsafe', subok=True, copy=True)
Copy of the array, cast to a specified type.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout order of the result.
'C' means C order, 'F' means Fortran order, 'A'
means 'F' order if all the arrays are Fortran contiguous,
'C' order otherwise, and 'K' means as close to the
order the array elements appear in memory as possible.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Defaults to 'unsafe'
for backwards compatibility.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
subok : bool, optional
If True, then sub-classes will be passed-through (default), otherwise
the returned array will be forced to be a base-class array.
copy : bool, optional
By default, astype always returns a newly allocated array. If this
is set to false, and the `dtype`, `order`, and `subok`
requirements are satisfied, the input array is returned instead
of a copy.
Returns
-------
arr_t : ndarray
Unless `copy` is False and the other conditions for returning the input
array are satisfied (see description for `copy` input parameter), `arr_t`
is a new array of the same shape as the input array, with dtype, order
given by `dtype`, `order`.
Notes
-----
.. versionchanged:: 1.17.0
Casting between a simple data type and a structured one is possible only
for "unsafe" casting. Casting to multiple fields is allowed, but
casting from multiple fields is not.
.. versionchanged:: 1.9.0
Casting from numeric to string types in 'safe' casting mode requires
that the string dtype length is long enough to store the max
integer/float value converted.
Raises
------
ComplexWarning
When casting from complex to float or int. To avoid this,
one should use ``a.real.astype(t)``.
Examples
--------
>>> x = np.array([1, 2, 2.5])
>>> x
array([1. , 2. , 2.5])
>>> x.astype(int)
array([1, 2, 2])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap',
"""
a.byteswap(inplace=False)
Swap the bytes of the array elements
Toggle between low-endian and big-endian data representation by
returning a byteswapped array, optionally swapped in-place.
Arrays of byte-strings are not swapped. The real and imaginary
parts of a complex number are swapped individually.
Parameters
----------
inplace : bool, optional
If ``True``, swap bytes in-place, default is ``False``.
Returns
-------
out : ndarray
The byteswapped array. If `inplace` is ``True``, this is
a view to self.
Examples
--------
>>> A = np.array([1, 256, 8755], dtype=np.int16)
>>> list(map(hex, A))
['0x1', '0x100', '0x2233']
>>> A.byteswap(inplace=True)
array([ 256, 1, 13090], dtype=int16)
>>> list(map(hex, A))
['0x100', '0x1', '0x3322']
Arrays of byte-strings are not swapped
>>> A = np.array([b'ceg', b'fac'])
>>> A.byteswap()
array([b'ceg', b'fac'], dtype='|S3')
``A.newbyteorder().byteswap()`` produces an array with the same values
but different representation in memory
>>> A = np.array([1, 2, 3])
>>> A.view(np.uint8)
array([1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0,
0, 0], dtype=uint8)
>>> A.newbyteorder().byteswap(inplace=True)
array([1, 2, 3])
>>> A.view(np.uint8)
array([0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0,
0, 3], dtype=uint8)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('choose',
"""
a.choose(choices, out=None, mode='raise')
Use an index array to construct a new array from a set of choices.
Refer to `numpy.choose` for full documentation.
See Also
--------
numpy.choose : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('clip',
"""
a.clip(min=None, max=None, out=None, **kwargs)
Return an array whose values are limited to ``[min, max]``.
One of max or min must be given.
Refer to `numpy.clip` for full documentation.
See Also
--------
numpy.clip : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('compress',
"""
a.compress(condition, axis=None, out=None)
Return selected slices of this array along given axis.
Refer to `numpy.compress` for full documentation.
See Also
--------
numpy.compress : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('conj',
"""
a.conj()
Complex-conjugate all elements.
Refer to `numpy.conjugate` for full documentation.
See Also
--------
numpy.conjugate : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('conjugate',
"""
a.conjugate()
Return the complex conjugate, element-wise.
Refer to `numpy.conjugate` for full documentation.
See Also
--------
numpy.conjugate : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('copy',
"""
a.copy(order='C')
Return a copy of the array.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :func:`numpy.copy` are very
similar, but have different default values for their order=
arguments.)
See also
--------
numpy.copy
numpy.copyto
Examples
--------
>>> x = np.array([[1,2,3],[4,5,6]], order='F')
>>> y = x.copy()
>>> x.fill(0)
>>> x
array([[0, 0, 0],
[0, 0, 0]])
>>> y
array([[1, 2, 3],
[4, 5, 6]])
>>> y.flags['C_CONTIGUOUS']
True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('cumprod',
"""
a.cumprod(axis=None, dtype=None, out=None)
Return the cumulative product of the elements along the given axis.
Refer to `numpy.cumprod` for full documentation.
See Also
--------
numpy.cumprod : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('cumsum',
"""
a.cumsum(axis=None, dtype=None, out=None)
Return the cumulative sum of the elements along the given axis.
Refer to `numpy.cumsum` for full documentation.
See Also
--------
numpy.cumsum : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('diagonal',
"""
a.diagonal(offset=0, axis1=0, axis2=1)
Return specified diagonals. In NumPy 1.9 the returned array is a
read-only view instead of a copy as in previous NumPy versions. In
a future version the read-only restriction will be removed.
Refer to :func:`numpy.diagonal` for full documentation.
See Also
--------
numpy.diagonal : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dot',
"""
a.dot(b, out=None)
Dot product of two arrays.
Refer to `numpy.dot` for full documentation.
See Also
--------
numpy.dot : equivalent function
Examples
--------
>>> a = np.eye(2)
>>> b = np.ones((2, 2)) * 2
>>> a.dot(b)
array([[2., 2.],
[2., 2.]])
This array method can be conveniently chained:
>>> a.dot(b).dot(b)
array([[8., 8.],
[8., 8.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dump',
"""a.dump(file)
Dump a pickle of the array to the specified file.
The array can be read back with pickle.load or numpy.load.
Parameters
----------
file : str or Path
A string naming the dump file.
.. versionchanged:: 1.17.0
`pathlib.Path` objects are now accepted.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dumps',
"""
a.dumps()
Returns the pickle of the array as a string.
pickle.loads or numpy.loads will convert the string back to an array.
Parameters
----------
None
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('fill',
"""
a.fill(value)
Fill the array with a scalar value.
Parameters
----------
value : scalar
All elements of `a` will be assigned this value.
Examples
--------
>>> a = np.array([1, 2])
>>> a.fill(0)
>>> a
array([0, 0])
>>> a = np.empty(2)
>>> a.fill(1)
>>> a
array([1., 1.])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flatten',
"""
a.flatten(order='C')
Return a copy of the array collapsed into one dimension.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
'C' means to flatten in row-major (C-style) order.
'F' means to flatten in column-major (Fortran-
style) order. 'A' means to flatten in column-major
order if `a` is Fortran *contiguous* in memory,
row-major order otherwise. 'K' means to flatten
`a` in the order the elements occur in memory.
The default is 'C'.
Returns
-------
y : ndarray
A copy of the input array, flattened to one dimension.
See Also
--------
ravel : Return a flattened array.
flat : A 1-D flat iterator over the array.
Examples
--------
>>> a = np.array([[1,2], [3,4]])
>>> a.flatten()
array([1, 2, 3, 4])
>>> a.flatten('F')
array([1, 3, 2, 4])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('getfield',
"""
a.getfield(dtype, offset=0)
Returns a field of the given array as a certain type.
A field is a view of the array data with a given data-type. The values in
the view are determined by the given type and the offset into the current
array in bytes. The offset needs to be such that the view dtype fits in the
array dtype; for example an array of dtype complex128 has 16-byte elements.
If taking a view with a 32-bit integer (4 bytes), the offset needs to be
between 0 and 12 bytes.
Parameters
----------
dtype : str or dtype
The data type of the view. The dtype size of the view can not be larger
than that of the array itself.
offset : int
Number of bytes to skip before beginning the element view.
Examples
--------
>>> x = np.diag([1.+1.j]*2)
>>> x[1, 1] = 2 + 4.j
>>> x
array([[1.+1.j, 0.+0.j],
[0.+0.j, 2.+4.j]])
>>> x.getfield(np.float64)
array([[1., 0.],
[0., 2.]])
By choosing an offset of 8 bytes we can select the complex part of the
array for our view:
>>> x.getfield(np.float64, offset=8)
array([[1., 0.],
[0., 4.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('item',
"""
a.item(*args)
Copy an element of an array to a standard Python scalar and return it.
Parameters
----------
\\*args : Arguments (variable number and type)
* none: in this case, the method only works for arrays
with one element (`a.size == 1`), which element is
copied into a standard Python scalar object and returned.
* int_type: this argument is interpreted as a flat index into
the array, specifying which element to copy and return.
* tuple of int_types: functions as does a single int_type argument,
except that the argument is interpreted as an nd-index into the
array.
Returns
-------
z : Standard Python scalar object
A copy of the specified element of the array as a suitable
Python scalar
Notes
-----
When the data type of `a` is longdouble or clongdouble, item() returns
a scalar array object because there is no available Python scalar that
would not lose information. Void arrays return a buffer object for item(),
unless fields are defined, in which case a tuple is returned.
`item` is very similar to a[args], except, instead of an array scalar,
a standard Python scalar is returned. This can be useful for speeding up
access to elements of the array and doing arithmetic on elements of the
array using Python's optimized math.
Examples
--------
>>> np.random.seed(123)
>>> x = np.random.randint(9, size=(3, 3))
>>> x
array([[2, 2, 6],
[1, 3, 6],
[1, 0, 1]])
>>> x.item(3)
1
>>> x.item(7)
0
>>> x.item((0, 1))
2
>>> x.item((2, 2))
1
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('itemset',
"""
a.itemset(*args)
Insert scalar into an array (scalar is cast to array's dtype, if possible)
There must be at least 1 argument, and define the last argument
as *item*. Then, ``a.itemset(*args)`` is equivalent to but faster
than ``a[args] = item``. The item should be a scalar value and `args`
must select a single item in the array `a`.
Parameters
----------
\\*args : Arguments
If one argument: a scalar, only used in case `a` is of size 1.
If two arguments: the last argument is the value to be set
and must be a scalar, the first argument specifies a single array
element location. It is either an int or a tuple.
Notes
-----
Compared to indexing syntax, `itemset` provides some speed increase
for placing a scalar into a particular location in an `ndarray`,
if you must do this. However, generally this is discouraged:
among other problems, it complicates the appearance of the code.
Also, when using `itemset` (and `item`) inside a loop, be sure
to assign the methods to a local variable to avoid the attribute
look-up at each loop iteration.
Examples
--------
>>> np.random.seed(123)
>>> x = np.random.randint(9, size=(3, 3))
>>> x
array([[2, 2, 6],
[1, 3, 6],
[1, 0, 1]])
>>> x.itemset(4, 0)
>>> x.itemset((2, 2), 9)
>>> x
array([[2, 2, 6],
[1, 0, 6],
[1, 0, 9]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('max',
"""
a.max(axis=None, out=None, keepdims=False, initial=<no value>, where=True)
Return the maximum along a given axis.
Refer to `numpy.amax` for full documentation.
See Also
--------
numpy.amax : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('mean',
"""
a.mean(axis=None, dtype=None, out=None, keepdims=False)
Returns the average of the array elements along given axis.
Refer to `numpy.mean` for full documentation.
See Also
--------
numpy.mean : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('min',
"""
a.min(axis=None, out=None, keepdims=False, initial=<no value>, where=True)
Return the minimum along a given axis.
Refer to `numpy.amin` for full documentation.
See Also
--------
numpy.amin : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder',
"""
arr.newbyteorder(new_order='S')
Return the array with the same data viewed with a different byte order.
Equivalent to::
arr.view(arr.dtype.newbytorder(new_order))
Changes are also made in all fields and sub-arrays of the array data
type.
Parameters
----------
new_order : string, optional
Byte order to force; a value from the byte order specifications
below. `new_order` codes can be any of:
* 'S' - swap dtype from current to opposite endian
* {'<', 'L'} - little endian
* {'>', 'B'} - big endian
* {'=', 'N'} - native order
* {'|', 'I'} - ignore (no change to byte order)
The default value ('S') results in swapping the current
byte order. The code does a case-insensitive check on the first
letter of `new_order` for the alternatives above. For example,
any of 'B' or 'b' or 'biggish' are valid to specify big-endian.
Returns
-------
new_arr : array
New array object with the dtype reflecting given change to the
byte order.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('nonzero',
"""
a.nonzero()
Return the indices of the elements that are non-zero.
Refer to `numpy.nonzero` for full documentation.
See Also
--------
numpy.nonzero : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('prod',
"""
a.prod(axis=None, dtype=None, out=None, keepdims=False, initial=1, where=True)
Return the product of the array elements over the given axis
Refer to `numpy.prod` for full documentation.
See Also
--------
numpy.prod : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ptp',
"""
a.ptp(axis=None, out=None, keepdims=False)
Peak to peak (maximum - minimum) value along a given axis.
Refer to `numpy.ptp` for full documentation.
See Also
--------
numpy.ptp : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('put',
"""
a.put(indices, values, mode='raise')
Set ``a.flat[n] = values[n]`` for all `n` in indices.
Refer to `numpy.put` for full documentation.
See Also
--------
numpy.put : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ravel',
"""
a.ravel([order])
Return a flattened array.
Refer to `numpy.ravel` for full documentation.
See Also
--------
numpy.ravel : equivalent function
ndarray.flat : a flat iterator on the array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('repeat',
"""
a.repeat(repeats, axis=None)
Repeat elements of an array.
Refer to `numpy.repeat` for full documentation.
See Also
--------
numpy.repeat : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('reshape',
"""
a.reshape(shape, order='C')
Returns an array containing the same data with a new shape.
Refer to `numpy.reshape` for full documentation.
See Also
--------
numpy.reshape : equivalent function
Notes
-----
Unlike the free function `numpy.reshape`, this method on `ndarray` allows
the elements of the shape parameter to be passed in as separate arguments.
For example, ``a.reshape(10, 11)`` is equivalent to
``a.reshape((10, 11))``.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('resize',
"""
a.resize(new_shape, refcheck=True)
Change shape and size of array in-place.
Parameters
----------
new_shape : tuple of ints, or `n` ints
Shape of resized array.
refcheck : bool, optional
If False, reference count will not be checked. Default is True.
Returns
-------
None
Raises
------
ValueError
If `a` does not own its own data or references or views to it exist,
and the data memory must be changed.
PyPy only: will always raise if the data memory must be changed, since
there is no reliable way to determine if references or views to it
exist.
SystemError
If the `order` keyword argument is specified. This behaviour is a
bug in NumPy.
See Also
--------
resize : Return a new array with the specified shape.
Notes
-----
This reallocates space for the data area if necessary.
Only contiguous arrays (data elements consecutive in memory) can be
resized.
The purpose of the reference count check is to make sure you
do not use this array as a buffer for another Python object and then
reallocate the memory. However, reference counts can increase in
other ways so if you are sure that you have not shared the memory
for this array with another Python object, then you may safely set
`refcheck` to False.
Examples
--------
Shrinking an array: array is flattened (in the order that the data are
stored in memory), resized, and reshaped:
>>> a = np.array([[0, 1], [2, 3]], order='C')
>>> a.resize((2, 1))
>>> a
array([[0],
[1]])
>>> a = np.array([[0, 1], [2, 3]], order='F')
>>> a.resize((2, 1))
>>> a
array([[0],
[2]])
Enlarging an array: as above, but missing entries are filled with zeros:
>>> b = np.array([[0, 1], [2, 3]])
>>> b.resize(2, 3) # new_shape parameter doesn't have to be a tuple
>>> b
array([[0, 1, 2],
[3, 0, 0]])
Referencing an array prevents resizing...
>>> c = a
>>> a.resize((1, 1))
Traceback (most recent call last):
...
ValueError: cannot resize an array that references or is referenced ...
Unless `refcheck` is False:
>>> a.resize((1, 1), refcheck=False)
>>> a
array([[0]])
>>> c
array([[0]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('round',
"""
a.round(decimals=0, out=None)
Return `a` with each element rounded to the given number of decimals.
Refer to `numpy.around` for full documentation.
See Also
--------
numpy.around : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('searchsorted',
"""
a.searchsorted(v, side='left', sorter=None)
Find indices where elements of v should be inserted in a to maintain order.
For full documentation, see `numpy.searchsorted`
See Also
--------
numpy.searchsorted : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('setfield',
"""
a.setfield(val, dtype, offset=0)
Put a value into a specified place in a field defined by a data-type.
Place `val` into `a`'s field defined by `dtype` and beginning `offset`
bytes into the field.
Parameters
----------
val : object
Value to be placed in field.
dtype : dtype object
Data-type of the field in which to place `val`.
offset : int, optional
The number of bytes into the field at which to place `val`.
Returns
-------
None
See Also
--------
getfield
Examples
--------
>>> x = np.eye(3)
>>> x.getfield(np.float64)
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
>>> x.setfield(3, np.int32)
>>> x.getfield(np.int32)
array([[3, 3, 3],
[3, 3, 3],
[3, 3, 3]], dtype=int32)
>>> x
array([[1.0e+000, 1.5e-323, 1.5e-323],
[1.5e-323, 1.0e+000, 1.5e-323],
[1.5e-323, 1.5e-323, 1.0e+000]])
>>> x.setfield(np.eye(3), np.int32)
>>> x
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags',
"""
a.setflags(write=None, align=None, uic=None)
Set array flags WRITEABLE, ALIGNED, (WRITEBACKIFCOPY and UPDATEIFCOPY),
respectively.
These Boolean-valued flags affect how numpy interprets the memory
area used by `a` (see Notes below). The ALIGNED flag can only
be set to True if the data is actually aligned according to the type.
The WRITEBACKIFCOPY and (deprecated) UPDATEIFCOPY flags can never be set
to True. The flag WRITEABLE can only be set to True if the array owns its
own memory, or the ultimate owner of the memory exposes a writeable buffer
interface, or is a string. (The exception for string is made so that
unpickling can be done without copying memory.)
Parameters
----------
write : bool, optional
Describes whether or not `a` can be written to.
align : bool, optional
Describes whether or not `a` is aligned properly for its type.
uic : bool, optional
Describes whether or not `a` is a copy of another "base" array.
Notes
-----
Array flags provide information about how the memory area used
for the array is to be interpreted. There are 7 Boolean flags
in use, only four of which can be changed by the user:
WRITEBACKIFCOPY, UPDATEIFCOPY, WRITEABLE, and ALIGNED.
WRITEABLE (W) the data area can be written to;
ALIGNED (A) the data and strides are aligned appropriately for the hardware
(as determined by the compiler);
UPDATEIFCOPY (U) (deprecated), replaced by WRITEBACKIFCOPY;
WRITEBACKIFCOPY (X) this array is a copy of some other array (referenced
by .base). When the C-API function PyArray_ResolveWritebackIfCopy is
called, the base array will be updated with the contents of this array.
All flags can be accessed using the single (upper case) letter as well
as the full name.
Examples
--------
>>> y = np.array([[3, 1, 7],
... [2, 0, 0],
... [8, 5, 9]])
>>> y
array([[3, 1, 7],
[2, 0, 0],
[8, 5, 9]])
>>> y.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : True
WRITEABLE : True
ALIGNED : True
WRITEBACKIFCOPY : False
UPDATEIFCOPY : False
>>> y.setflags(write=0, align=0)
>>> y.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : True
WRITEABLE : False
ALIGNED : False
WRITEBACKIFCOPY : False
UPDATEIFCOPY : False
>>> y.setflags(uic=1)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: cannot set WRITEBACKIFCOPY flag to True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('sort',
"""
a.sort(axis=-1, kind=None, order=None)
Sort an array in-place. Refer to `numpy.sort` for full documentation.
Parameters
----------
axis : int, optional
Axis along which to sort. Default is -1, which means sort along the
last axis.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm. The default is 'quicksort'. Note that both 'stable'
and 'mergesort' use timsort under the covers and, in general, the
actual implementation will vary with datatype. The 'mergesort' option
is retained for backwards compatibility.
.. versionchanged:: 1.15.0.
The 'stable' option was added.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
See Also
--------
numpy.sort : Return a sorted copy of an array.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in sorted array.
partition: Partial sort.
Notes
-----
See `numpy.sort` for notes on the different sorting algorithms.
Examples
--------
>>> a = np.array([[1,4], [3,1]])
>>> a.sort(axis=1)
>>> a
array([[1, 4],
[1, 3]])
>>> a.sort(axis=0)
>>> a
array([[1, 3],
[1, 4]])
Use the `order` keyword to specify a field to use when sorting a
structured array:
>>> a = np.array([('a', 2), ('c', 1)], dtype=[('x', 'S1'), ('y', int)])
>>> a.sort(order='y')
>>> a
array([(b'c', 1), (b'a', 2)],
dtype=[('x', 'S1'), ('y', '<i8')])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('partition',
"""
a.partition(kth, axis=-1, kind='introselect', order=None)
Rearranges the elements in the array in such a way that the value of the
element in kth position is in the position it would be in a sorted array.
All elements smaller than the kth element are moved before this element and
all equal or greater are moved behind it. The ordering of the elements in
the two partitions is undefined.
.. versionadded:: 1.8.0
Parameters
----------
kth : int or sequence of ints
Element index to partition by. The kth element value will be in its
final sorted position and all smaller elements will be moved before it
and all equal or greater elements behind it.
The order of all elements in the partitions is undefined.
If provided with a sequence of kth it will partition all elements
indexed by kth of them into their sorted position at once.
axis : int, optional
Axis along which to sort. Default is -1, which means sort along the
last axis.
kind : {'introselect'}, optional
Selection algorithm. Default is 'introselect'.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need to be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
See Also
--------
numpy.partition : Return a parititioned copy of an array.
argpartition : Indirect partition.
sort : Full sort.
Notes
-----
See ``np.partition`` for notes on the different algorithms.
Examples
--------
>>> a = np.array([3, 4, 2, 1])
>>> a.partition(3)
>>> a
array([2, 1, 3, 4])
>>> a.partition((1, 3))
>>> a
array([1, 2, 3, 4])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('squeeze',
"""
a.squeeze(axis=None)
Remove single-dimensional entries from the shape of `a`.
Refer to `numpy.squeeze` for full documentation.
See Also
--------
numpy.squeeze : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('std',
"""
a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=False)
Returns the standard deviation of the array elements along given axis.
Refer to `numpy.std` for full documentation.
See Also
--------
numpy.std : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('sum',
"""
a.sum(axis=None, dtype=None, out=None, keepdims=False, initial=0, where=True)
Return the sum of the array elements over the given axis.
Refer to `numpy.sum` for full documentation.
See Also
--------
numpy.sum : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('swapaxes',
"""
a.swapaxes(axis1, axis2)
Return a view of the array with `axis1` and `axis2` interchanged.
Refer to `numpy.swapaxes` for full documentation.
See Also
--------
numpy.swapaxes : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('take',
"""
a.take(indices, axis=None, out=None, mode='raise')
Return an array formed from the elements of `a` at the given indices.
Refer to `numpy.take` for full documentation.
See Also
--------
numpy.take : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tofile',
"""
a.tofile(fid, sep="", format="%s")
Write array to a file as text or binary (default).
Data is always written in 'C' order, independent of the order of `a`.
The data produced by this method can be recovered using the function
fromfile().
Parameters
----------
fid : file or str or Path
An open file object, or a string containing a filename.
.. versionchanged:: 1.17.0
`pathlib.Path` objects are now accepted.
sep : str
Separator between array items for text output.
If "" (empty), a binary file is written, equivalent to
``file.write(a.tobytes())``.
format : str
Format string for text file output.
Each entry in the array is formatted to text by first converting
it to the closest Python type, and then using "format" % item.
Notes
-----
This is a convenience function for quick storage of array data.
Information on endianness and precision is lost, so this method is not a
good choice for files intended to archive data or transport data between
machines with different endianness. Some of these problems can be overcome
by outputting the data as text files, at the expense of speed and file
size.
When fid is a file object, array contents are directly written to the
file, bypassing the file object's ``write`` method. As a result, tofile
cannot be used with files objects supporting compression (e.g., GzipFile)
or file-like objects that do not support ``fileno()`` (e.g., BytesIO).
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tolist',
"""
a.tolist()
Return the array as an ``a.ndim``-levels deep nested list of Python scalars.
Return a copy of the array data as a (nested) Python list.
Data items are converted to the nearest compatible builtin Python type, via
the `~numpy.ndarray.item` function.
If ``a.ndim`` is 0, then since the depth of the nested list is 0, it will
not be a list at all, but a simple Python scalar.
Parameters
----------
none
Returns
-------
y : object, or list of object, or list of list of object, or ...
The possibly nested list of array elements.
Notes
-----
The array may be recreated via ``a = np.array(a.tolist())``, although this
may sometimes lose precision.
Examples
--------
For a 1D array, ``a.tolist()`` is almost the same as ``list(a)``:
>>> a = np.array([1, 2])
>>> list(a)
[1, 2]
>>> a.tolist()
[1, 2]
However, for a 2D array, ``tolist`` applies recursively:
>>> a = np.array([[1, 2], [3, 4]])
>>> list(a)
[array([1, 2]), array([3, 4])]
>>> a.tolist()
[[1, 2], [3, 4]]
The base case for this recursion is a 0D array:
>>> a = np.array(1)
>>> list(a)
Traceback (most recent call last):
...
TypeError: iteration over a 0-d array
>>> a.tolist()
1
"""))
tobytesdoc = """
a.{name}(order='C')
Construct Python bytes containing the raw data bytes in the array.
Constructs Python bytes showing a copy of the raw contents of
data memory. The bytes object can be produced in either 'C' or 'Fortran',
or 'Any' order (the default is 'C'-order). 'Any' order means C-order
unless the F_CONTIGUOUS flag in the array is set, in which case it
means 'Fortran' order.
{deprecated}
Parameters
----------
order : {{'C', 'F', None}}, optional
Order of the data for multidimensional arrays:
C, Fortran, or the same as for the original array.
Returns
-------
s : bytes
Python bytes exhibiting a copy of `a`'s raw data.
Examples
--------
>>> x = np.array([[0, 1], [2, 3]], dtype='<u2')
>>> x.tobytes()
b'\\x00\\x00\\x01\\x00\\x02\\x00\\x03\\x00'
>>> x.tobytes('C') == x.tobytes()
True
>>> x.tobytes('F')
b'\\x00\\x00\\x02\\x00\\x01\\x00\\x03\\x00'
"""
add_newdoc('numpy.core.multiarray', 'ndarray',
('tostring', tobytesdoc.format(name='tostring',
deprecated=
'This function is a compatibility '
'alias for tobytes. Despite its '
'name it returns bytes not '
'strings.')))
add_newdoc('numpy.core.multiarray', 'ndarray',
('tobytes', tobytesdoc.format(name='tobytes',
deprecated='.. versionadded:: 1.9.0')))
add_newdoc('numpy.core.multiarray', 'ndarray', ('trace',
"""
a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None)
Return the sum along diagonals of the array.
Refer to `numpy.trace` for full documentation.
See Also
--------
numpy.trace : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('transpose',
"""
a.transpose(*axes)
Returns a view of the array with axes transposed.
For a 1-D array this has no effect, as a transposed vector is simply the
same vector. To convert a 1-D array into a 2D column vector, an additional
dimension must be added. `np.atleast2d(a).T` achieves this, as does
`a[:, np.newaxis]`.
For a 2-D array, this is a standard matrix transpose.
For an n-D array, if axes are given, their order indicates how the
axes are permuted (see Examples). If axes are not provided and
``a.shape = (i[0], i[1], ... i[n-2], i[n-1])``, then
``a.transpose().shape = (i[n-1], i[n-2], ... i[1], i[0])``.
Parameters
----------
axes : None, tuple of ints, or `n` ints
* None or no argument: reverses the order of the axes.
* tuple of ints: `i` in the `j`-th place in the tuple means `a`'s
`i`-th axis becomes `a.transpose()`'s `j`-th axis.
* `n` ints: same as an n-tuple of the same ints (this form is
intended simply as a "convenience" alternative to the tuple form)
Returns
-------
out : ndarray
View of `a`, with axes suitably permuted.
See Also
--------
ndarray.T : Array property returning the array transposed.
ndarray.reshape : Give a new shape to an array without changing its data.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> a
array([[1, 2],
[3, 4]])
>>> a.transpose()
array([[1, 3],
[2, 4]])
>>> a.transpose((1, 0))
array([[1, 3],
[2, 4]])
>>> a.transpose(1, 0)
array([[1, 3],
[2, 4]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('var',
"""
a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=False)
Returns the variance of the array elements, along given axis.
Refer to `numpy.var` for full documentation.
See Also
--------
numpy.var : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('view',
"""
a.view(dtype=None, type=None)
New view of array with the same data.
Parameters
----------
dtype : data-type or ndarray sub-class, optional
Data-type descriptor of the returned view, e.g., float32 or int16. The
default, None, results in the view having the same data-type as `a`.
This argument can also be specified as an ndarray sub-class, which
then specifies the type of the returned object (this is equivalent to
setting the ``type`` parameter).
type : Python type, optional
Type of the returned view, e.g., ndarray or matrix. Again, the
default None results in type preservation.
Notes
-----
``a.view()`` is used two different ways:
``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view
of the array's memory with a different data-type. This can cause a
reinterpretation of the bytes of memory.
``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just
returns an instance of `ndarray_subclass` that looks at the same array
(same shape, dtype, etc.) This does not cause a reinterpretation of the
memory.
For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of
bytes per entry than the previous dtype (for example, converting a
regular array to a structured array), then the behavior of the view
cannot be predicted just from the superficial appearance of ``a`` (shown
by ``print(a)``). It also depends on exactly how ``a`` is stored in
memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus
defined as a slice or transpose, etc., the view may give different
results.
Examples
--------
>>> x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)])
Viewing array data using a different type and dtype:
>>> y = x.view(dtype=np.int16, type=np.matrix)
>>> y
matrix([[513]], dtype=int16)
>>> print(type(y))
<class 'numpy.matrix'>
Creating a view on a structured array so it can be used in calculations
>>> x = np.array([(1, 2),(3,4)], dtype=[('a', np.int8), ('b', np.int8)])
>>> xv = x.view(dtype=np.int8).reshape(-1,2)
>>> xv
array([[1, 2],
[3, 4]], dtype=int8)
>>> xv.mean(0)
array([2., 3.])
Making changes to the view changes the underlying array
>>> xv[0,1] = 20
>>> x
array([(1, 20), (3, 4)], dtype=[('a', 'i1'), ('b', 'i1')])
Using a view to convert an array to a recarray:
>>> z = x.view(np.recarray)
>>> z.a
array([1, 3], dtype=int8)
Views share data:
>>> x[0] = (9, 10)
>>> z[0]
(9, 10)
Views that change the dtype size (bytes per entry) should normally be
avoided on arrays defined by slices, transposes, fortran-ordering, etc.:
>>> x = np.array([[1,2,3],[4,5,6]], dtype=np.int16)
>>> y = x[:, 0:2]
>>> y
array([[1, 2],
[4, 5]], dtype=int16)
>>> y.view(dtype=[('width', np.int16), ('length', np.int16)])
Traceback (most recent call last):
...
ValueError: To change to a dtype of a different size, the array must be C-contiguous
>>> z = y.copy()
>>> z.view(dtype=[('width', np.int16), ('length', np.int16)])
array([[(1, 2)],
[(4, 5)]], dtype=[('width', '<i2'), ('length', '<i2')])
"""))
##############################################################################
#
# umath functions
#
##############################################################################
add_newdoc('numpy.core.umath', 'frompyfunc',
"""
frompyfunc(func, nin, nout)
Takes an arbitrary Python function and returns a NumPy ufunc.
Can be used, for example, to add broadcasting to a built-in Python
function (see Examples section).
Parameters
----------
func : Python function object
An arbitrary Python function.
nin : int
The number of input arguments.
nout : int
The number of objects returned by `func`.
Returns
-------
out : ufunc
Returns a NumPy universal function (``ufunc``) object.
See Also
--------
vectorize : evaluates pyfunc over input arrays using broadcasting rules of numpy
Notes
-----
The returned ufunc always returns PyObject arrays.
Examples
--------
Use frompyfunc to add broadcasting to the Python function ``oct``:
>>> oct_array = np.frompyfunc(oct, 1, 1)
>>> oct_array(np.array((10, 30, 100)))
array(['0o12', '0o36', '0o144'], dtype=object)
>>> np.array((oct(10), oct(30), oct(100))) # for comparison
array(['0o12', '0o36', '0o144'], dtype='<U5')
""")
add_newdoc('numpy.core.umath', 'geterrobj',
"""
geterrobj()
Return the current object that defines floating-point error handling.
The error object contains all information that defines the error handling
behavior in NumPy. `geterrobj` is used internally by the other
functions that get and set error handling behavior (`geterr`, `seterr`,
`geterrcall`, `seterrcall`).
Returns
-------
errobj : list
The error object, a list containing three elements:
[internal numpy buffer size, error mask, error callback function].
The error mask is a single integer that holds the treatment information
on all four floating point errors. The information for each error type
is contained in three bits of the integer. If we print it in base 8, we
can see what treatment is set for "invalid", "under", "over", and
"divide" (in that order). The printed string can be interpreted with
* 0 : 'ignore'
* 1 : 'warn'
* 2 : 'raise'
* 3 : 'call'
* 4 : 'print'
* 5 : 'log'
See Also
--------
seterrobj, seterr, geterr, seterrcall, geterrcall
getbufsize, setbufsize
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> np.geterrobj() # first get the defaults
[8192, 521, None]
>>> def err_handler(type, flag):
... print("Floating point error (%s), with flag %s" % (type, flag))
...
>>> old_bufsize = np.setbufsize(20000)
>>> old_err = np.seterr(divide='raise')
>>> old_handler = np.seterrcall(err_handler)
>>> np.geterrobj()
[8192, 521, <function err_handler at 0x91dcaac>]
>>> old_err = np.seterr(all='ignore')
>>> np.base_repr(np.geterrobj()[1], 8)
'0'
>>> old_err = np.seterr(divide='warn', over='log', under='call',
... invalid='print')
>>> np.base_repr(np.geterrobj()[1], 8)
'4351'
""")
add_newdoc('numpy.core.umath', 'seterrobj',
"""
seterrobj(errobj)
Set the object that defines floating-point error handling.
The error object contains all information that defines the error handling
behavior in NumPy. `seterrobj` is used internally by the other
functions that set error handling behavior (`seterr`, `seterrcall`).
Parameters
----------
errobj : list
The error object, a list containing three elements:
[internal numpy buffer size, error mask, error callback function].
The error mask is a single integer that holds the treatment information
on all four floating point errors. The information for each error type
is contained in three bits of the integer. If we print it in base 8, we
can see what treatment is set for "invalid", "under", "over", and
"divide" (in that order). The printed string can be interpreted with
* 0 : 'ignore'
* 1 : 'warn'
* 2 : 'raise'
* 3 : 'call'
* 4 : 'print'
* 5 : 'log'
See Also
--------
geterrobj, seterr, geterr, seterrcall, geterrcall
getbufsize, setbufsize
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> old_errobj = np.geterrobj() # first get the defaults
>>> old_errobj
[8192, 521, None]
>>> def err_handler(type, flag):
... print("Floating point error (%s), with flag %s" % (type, flag))
...
>>> new_errobj = [20000, 12, err_handler]
>>> np.seterrobj(new_errobj)
>>> np.base_repr(12, 8) # int for divide=4 ('print') and over=1 ('warn')
'14'
>>> np.geterr()
{'over': 'warn', 'divide': 'print', 'invalid': 'ignore', 'under': 'ignore'}
>>> np.geterrcall() is err_handler
True
""")
##############################################################################
#
# compiled_base functions
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'add_docstring',
"""
add_docstring(obj, docstring)
Add a docstring to a built-in obj if possible.
If the obj already has a docstring raise a RuntimeError
If this routine does not know how to add a docstring to the object
raise a TypeError
""")
add_newdoc('numpy.core.umath', '_add_newdoc_ufunc',
"""
add_ufunc_docstring(ufunc, new_docstring)
Replace the docstring for a ufunc with new_docstring.
This method will only work if the current docstring for
the ufunc is NULL. (At the C level, i.e. when ufunc->doc is NULL.)
Parameters
----------
ufunc : numpy.ufunc
A ufunc whose current doc is NULL.
new_docstring : string
The new docstring for the ufunc.
Notes
-----
This method allocates memory for new_docstring on
the heap. Technically this creates a mempory leak, since this
memory will not be reclaimed until the end of the program
even if the ufunc itself is removed. However this will only
be a problem if the user is repeatedly creating ufuncs with
no documentation, adding documentation via add_newdoc_ufunc,
and then throwing away the ufunc.
""")
add_newdoc('numpy.core._multiarray_tests', 'format_float_OSprintf_g',
"""
format_float_OSprintf_g(val, precision)
Print a floating point scalar using the system's printf function,
equivalent to:
printf("%.*g", precision, val);
for half/float/double, or replacing 'g' by 'Lg' for longdouble. This
method is designed to help cross-validate the format_float_* methods.
Parameters
----------
val : python float or numpy floating scalar
Value to format.
precision : non-negative integer, optional
Precision given to printf.
Returns
-------
rep : string
The string representation of the floating point value
See Also
--------
format_float_scientific
format_float_positional
""")
##############################################################################
#
# Documentation for ufunc attributes and methods
#
##############################################################################
##############################################################################
#
# ufunc object
#
##############################################################################
add_newdoc('numpy.core', 'ufunc',
"""
Functions that operate element by element on whole arrays.
To see the documentation for a specific ufunc, use `info`. For
example, ``np.info(np.sin)``. Because ufuncs are written in C
(for speed) and linked into Python with NumPy's ufunc facility,
Python's help() function finds this page whenever help() is called
on a ufunc.
A detailed explanation of ufuncs can be found in the docs for :ref:`ufuncs`.
Calling ufuncs:
===============
op(*x[, out], where=True, **kwargs)
Apply `op` to the arguments `*x` elementwise, broadcasting the arguments.
The broadcasting rules are:
* Dimensions of length 1 may be prepended to either array.
* Arrays may be repeated along dimensions of length 1.
Parameters
----------
*x : array_like
Input arrays.
out : ndarray, None, or tuple of ndarray and None, optional
Alternate array object(s) in which to put the result; if provided, it
must have a shape that the inputs broadcast to. A tuple of arrays
(possible only as a keyword argument) must have length equal to the
number of outputs; use `None` for uninitialized outputs to be
allocated by the ufunc.
where : array_like, optional
This condition is broadcast over the input. At locations where the
condition is True, the `out` array will be set to the ufunc result.
Elsewhere, the `out` array will retain its original value.
Note that if an uninitialized `out` array is created via the default
``out=None``, locations within it where the condition is False will
remain uninitialized.
**kwargs
For other keyword-only arguments, see the :ref:`ufunc docs <ufuncs.kwargs>`.
Returns
-------
r : ndarray or tuple of ndarray
`r` will have the shape that the arrays in `x` broadcast to; if `out` is
provided, it will be returned. If not, `r` will be allocated and
may contain uninitialized values. If the function has more than one
output, then the result will be a tuple of arrays.
""")
##############################################################################
#
# ufunc attributes
#
##############################################################################
add_newdoc('numpy.core', 'ufunc', ('identity',
"""
The identity value.
Data attribute containing the identity element for the ufunc, if it has one.
If it does not, the attribute value is None.
Examples
--------
>>> np.add.identity
0
>>> np.multiply.identity
1
>>> np.power.identity
1
>>> print(np.exp.identity)
None
"""))
add_newdoc('numpy.core', 'ufunc', ('nargs',
"""
The number of arguments.
Data attribute containing the number of arguments the ufunc takes, including
optional ones.
Notes
-----
Typically this value will be one more than what you might expect because all
ufuncs take the optional "out" argument.
Examples
--------
>>> np.add.nargs
3
>>> np.multiply.nargs
3
>>> np.power.nargs
3
>>> np.exp.nargs
2
"""))
add_newdoc('numpy.core', 'ufunc', ('nin',
"""
The number of inputs.
Data attribute containing the number of arguments the ufunc treats as input.
Examples
--------
>>> np.add.nin
2
>>> np.multiply.nin
2
>>> np.power.nin
2
>>> np.exp.nin
1
"""))
add_newdoc('numpy.core', 'ufunc', ('nout',
"""
The number of outputs.
Data attribute containing the number of arguments the ufunc treats as output.
Notes
-----
Since all ufuncs can take output arguments, this will always be (at least) 1.
Examples
--------
>>> np.add.nout
1
>>> np.multiply.nout
1
>>> np.power.nout
1
>>> np.exp.nout
1
"""))
add_newdoc('numpy.core', 'ufunc', ('ntypes',
"""
The number of types.
The number of numerical NumPy types - of which there are 18 total - on which
the ufunc can operate.
See Also
--------
numpy.ufunc.types
Examples
--------
>>> np.add.ntypes
18
>>> np.multiply.ntypes
18
>>> np.power.ntypes
17
>>> np.exp.ntypes
7
>>> np.remainder.ntypes
14
"""))
add_newdoc('numpy.core', 'ufunc', ('types',
"""
Returns a list with types grouped input->output.
Data attribute listing the data-type "Domain-Range" groupings the ufunc can
deliver. The data-types are given using the character codes.
See Also
--------
numpy.ufunc.ntypes
Examples
--------
>>> np.add.types
['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D',
'GG->G', 'OO->O']
>>> np.multiply.types
['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D',
'GG->G', 'OO->O']
>>> np.power.types
['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',
'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', 'GG->G',
'OO->O']
>>> np.exp.types
['f->f', 'd->d', 'g->g', 'F->F', 'D->D', 'G->G', 'O->O']
>>> np.remainder.types
['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',
'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'OO->O']
"""))
add_newdoc('numpy.core', 'ufunc', ('signature',
"""
Definition of the core elements a generalized ufunc operates on.
The signature determines how the dimensions of each input/output array
are split into core and loop dimensions:
1. Each dimension in the signature is matched to a dimension of the
corresponding passed-in array, starting from the end of the shape tuple.
2. Core dimensions assigned to the same label in the signature must have
exactly matching sizes, no broadcasting is performed.
3. The core dimensions are removed from all inputs and the remaining
dimensions are broadcast together, defining the loop dimensions.
Notes
-----
Generalized ufuncs are used internally in many linalg functions, and in
the testing suite; the examples below are taken from these.
For ufuncs that operate on scalars, the signature is `None`, which is
equivalent to '()' for every argument.
Examples
--------
>>> np.core.umath_tests.matrix_multiply.signature
'(m,n),(n,p)->(m,p)'
>>> np.linalg._umath_linalg.det.signature
'(m,m)->()'
>>> np.add.signature is None
True # equivalent to '(),()->()'
"""))
##############################################################################
#
# ufunc methods
#
##############################################################################
add_newdoc('numpy.core', 'ufunc', ('reduce',
"""
reduce(a, axis=0, dtype=None, out=None, keepdims=False, initial=<no value>, where=True)
Reduces `a`'s dimension by one, by applying ufunc along one axis.
Let :math:`a.shape = (N_0, ..., N_i, ..., N_{M-1})`. Then
:math:`ufunc.reduce(a, axis=i)[k_0, ..,k_{i-1}, k_{i+1}, .., k_{M-1}]` =
the result of iterating `j` over :math:`range(N_i)`, cumulatively applying
ufunc to each :math:`a[k_0, ..,k_{i-1}, j, k_{i+1}, .., k_{M-1}]`.
For a one-dimensional array, reduce produces results equivalent to:
::
r = op.identity # op = ufunc
for i in range(len(A)):
r = op(r, A[i])
return r
For example, add.reduce() is equivalent to sum().
Parameters
----------
a : array_like
The array to act on.
axis : None or int or tuple of ints, optional
Axis or axes along which a reduction is performed.
The default (`axis` = 0) is perform a reduction over the first
dimension of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is `None`, a reduction is performed over all the axes.
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
For operations which are either not commutative or not associative,
doing a reduction over multiple axes is not well-defined. The
ufuncs do not currently raise an exception in this case, but will
likely do so in the future.
dtype : data-type code, optional
The type used to represent the intermediate results. Defaults
to the data-type of the output array if this is provided, or
the data-type of the input array if no output array is provided.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If not provided or `None`,
a freshly-allocated array is returned. For consistency with
``ufunc.__call__``, if given as a keyword, this may be wrapped in a
1-element tuple.
.. versionchanged:: 1.13.0
Tuples are allowed for keyword argument.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.7.0
initial : scalar, optional
The value with which to start the reduction.
If the ufunc has no identity or the dtype is object, this defaults
to None - otherwise it defaults to ufunc.identity.
If ``None`` is given, the first element of the reduction is used,
and an error is thrown if the reduction is empty.
.. versionadded:: 1.15.0
where : array_like of bool, optional
A boolean array which is broadcasted to match the dimensions
of `a`, and selects elements to include in the reduction. Note
that for ufuncs like ``minimum`` that do not have an identity
defined, one has to pass in also ``initial``.
.. versionadded:: 1.17.0
Returns
-------
r : ndarray
The reduced array. If `out` was supplied, `r` is a reference to it.
Examples
--------
>>> np.multiply.reduce([2,3,5])
30
A multi-dimensional array example:
>>> X = np.arange(8).reshape((2,2,2))
>>> X
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.add.reduce(X, 0)
array([[ 4, 6],
[ 8, 10]])
>>> np.add.reduce(X) # confirm: default axis value is 0
array([[ 4, 6],
[ 8, 10]])
>>> np.add.reduce(X, 1)
array([[ 2, 4],
[10, 12]])
>>> np.add.reduce(X, 2)
array([[ 1, 5],
[ 9, 13]])
You can use the ``initial`` keyword argument to initialize the reduction
with a different value, and ``where`` to select specific elements to include:
>>> np.add.reduce([10], initial=5)
15
>>> np.add.reduce(np.ones((2, 2, 2)), axis=(0, 2), initial=10)
array([14., 14.])
>>> a = np.array([10., np.nan, 10])
>>> np.add.reduce(a, where=~np.isnan(a))
20.0
Allows reductions of empty arrays where they would normally fail, i.e.
for ufuncs without an identity.
>>> np.minimum.reduce([], initial=np.inf)
inf
>>> np.minimum.reduce([[1., 2.], [3., 4.]], initial=10., where=[True, False])
array([ 1., 10.])
>>> np.minimum.reduce([])
Traceback (most recent call last):
...
ValueError: zero-size array to reduction operation minimum which has no identity
"""))
add_newdoc('numpy.core', 'ufunc', ('accumulate',
"""
accumulate(array, axis=0, dtype=None, out=None)
Accumulate the result of applying the operator to all elements.
For a one-dimensional array, accumulate produces results equivalent to::
r = np.empty(len(A))
t = op.identity # op = the ufunc being applied to A's elements
for i in range(len(A)):
t = op(t, A[i])
r[i] = t
return r
For example, add.accumulate() is equivalent to np.cumsum().
For a multi-dimensional array, accumulate is applied along only one
axis (axis zero by default; see Examples below) so repeated use is
necessary if one wants to accumulate over multiple axes.
Parameters
----------
array : array_like
The array to act on.
axis : int, optional
The axis along which to apply the accumulation; default is zero.
dtype : data-type code, optional
The data-type used to represent the intermediate results. Defaults
to the data-type of the output array if such is provided, or the
the data-type of the input array if no output array is provided.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If not provided or `None`,
a freshly-allocated array is returned. For consistency with
``ufunc.__call__``, if given as a keyword, this may be wrapped in a
1-element tuple.
.. versionchanged:: 1.13.0
Tuples are allowed for keyword argument.
Returns
-------
r : ndarray
The accumulated values. If `out` was supplied, `r` is a reference to
`out`.
Examples
--------
1-D array examples:
>>> np.add.accumulate([2, 3, 5])
array([ 2, 5, 10])
>>> np.multiply.accumulate([2, 3, 5])
array([ 2, 6, 30])
2-D array examples:
>>> I = np.eye(2)
>>> I
array([[1., 0.],
[0., 1.]])
Accumulate along axis 0 (rows), down columns:
>>> np.add.accumulate(I, 0)
array([[1., 0.],
[1., 1.]])
>>> np.add.accumulate(I) # no axis specified = axis zero
array([[1., 0.],
[1., 1.]])
Accumulate along axis 1 (columns), through rows:
>>> np.add.accumulate(I, 1)
array([[1., 1.],
[0., 1.]])
"""))
add_newdoc('numpy.core', 'ufunc', ('reduceat',
"""
reduceat(a, indices, axis=0, dtype=None, out=None)
Performs a (local) reduce with specified slices over a single axis.
For i in ``range(len(indices))``, `reduceat` computes
``ufunc.reduce(a[indices[i]:indices[i+1]])``, which becomes the i-th
generalized "row" parallel to `axis` in the final result (i.e., in a
2-D array, for example, if `axis = 0`, it becomes the i-th row, but if
`axis = 1`, it becomes the i-th column). There are three exceptions to this:
* when ``i = len(indices) - 1`` (so for the last index),
``indices[i+1] = a.shape[axis]``.
* if ``indices[i] >= indices[i + 1]``, the i-th generalized "row" is
simply ``a[indices[i]]``.
* if ``indices[i] >= len(a)`` or ``indices[i] < 0``, an error is raised.
The shape of the output depends on the size of `indices`, and may be
larger than `a` (this happens if ``len(indices) > a.shape[axis]``).
Parameters
----------
a : array_like
The array to act on.
indices : array_like
Paired indices, comma separated (not colon), specifying slices to
reduce.
axis : int, optional
The axis along which to apply the reduceat.
dtype : data-type code, optional
The type used to represent the intermediate results. Defaults
to the data type of the output array if this is provided, or
the data type of the input array if no output array is provided.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If not provided or `None`,
a freshly-allocated array is returned. For consistency with
``ufunc.__call__``, if given as a keyword, this may be wrapped in a
1-element tuple.
.. versionchanged:: 1.13.0
Tuples are allowed for keyword argument.
Returns
-------
r : ndarray
The reduced values. If `out` was supplied, `r` is a reference to
`out`.
Notes
-----
A descriptive example:
If `a` is 1-D, the function `ufunc.accumulate(a)` is the same as
``ufunc.reduceat(a, indices)[::2]`` where `indices` is
``range(len(array) - 1)`` with a zero placed
in every other element:
``indices = zeros(2 * len(a) - 1)``, ``indices[1::2] = range(1, len(a))``.
Don't be fooled by this attribute's name: `reduceat(a)` is not
necessarily smaller than `a`.
Examples
--------
To take the running sum of four successive values:
>>> np.add.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2]
array([ 6, 10, 14, 18])
A 2-D example:
>>> x = np.linspace(0, 15, 16).reshape(4,4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[12., 13., 14., 15.]])
::
# reduce such that the result has the following five rows:
# [row1 + row2 + row3]
# [row4]
# [row2]
# [row3]
# [row1 + row2 + row3 + row4]
>>> np.add.reduceat(x, [0, 3, 1, 2, 0])
array([[12., 15., 18., 21.],
[12., 13., 14., 15.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[24., 28., 32., 36.]])
::
# reduce such that result has the following two columns:
# [col1 * col2 * col3, col4]
>>> np.multiply.reduceat(x, [0, 3], 1)
array([[ 0., 3.],
[ 120., 7.],
[ 720., 11.],
[2184., 15.]])
"""))
add_newdoc('numpy.core', 'ufunc', ('outer',
"""
outer(A, B, **kwargs)
Apply the ufunc `op` to all pairs (a, b) with a in `A` and b in `B`.
Let ``M = A.ndim``, ``N = B.ndim``. Then the result, `C`, of
``op.outer(A, B)`` is an array of dimension M + N such that:
.. math:: C[i_0, ..., i_{M-1}, j_0, ..., j_{N-1}] =
op(A[i_0, ..., i_{M-1}], B[j_0, ..., j_{N-1}])
For `A` and `B` one-dimensional, this is equivalent to::
r = empty(len(A),len(B))
for i in range(len(A)):
for j in range(len(B)):
r[i,j] = op(A[i], B[j]) # op = ufunc in question
Parameters
----------
A : array_like
First array
B : array_like
Second array
kwargs : any
Arguments to pass on to the ufunc. Typically `dtype` or `out`.
Returns
-------
r : ndarray
Output array
See Also
--------
numpy.outer
Examples
--------
>>> np.multiply.outer([1, 2, 3], [4, 5, 6])
array([[ 4, 5, 6],
[ 8, 10, 12],
[12, 15, 18]])
A multi-dimensional example:
>>> A = np.array([[1, 2, 3], [4, 5, 6]])
>>> A.shape
(2, 3)
>>> B = np.array([[1, 2, 3, 4]])
>>> B.shape
(1, 4)
>>> C = np.multiply.outer(A, B)
>>> C.shape; C
(2, 3, 1, 4)
array([[[[ 1, 2, 3, 4]],
[[ 2, 4, 6, 8]],
[[ 3, 6, 9, 12]]],
[[[ 4, 8, 12, 16]],
[[ 5, 10, 15, 20]],
[[ 6, 12, 18, 24]]]])
"""))
add_newdoc('numpy.core', 'ufunc', ('at',
"""
at(a, indices, b=None)
Performs unbuffered in place operation on operand 'a' for elements
specified by 'indices'. For addition ufunc, this method is equivalent to
``a[indices] += b``, except that results are accumulated for elements that
are indexed more than once. For example, ``a[[0,0]] += 1`` will only
increment the first element once because of buffering, whereas
``add.at(a, [0,0], 1)`` will increment the first element twice.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
The array to perform in place operation on.
indices : array_like or tuple
Array like index object or slice object for indexing into first
operand. If first operand has multiple dimensions, indices can be a
tuple of array like index objects or slice objects.
b : array_like
Second operand for ufuncs requiring two operands. Operand must be
broadcastable over first operand after indexing or slicing.
Examples
--------
Set items 0 and 1 to their negative values:
>>> a = np.array([1, 2, 3, 4])
>>> np.negative.at(a, [0, 1])
>>> a
array([-1, -2, 3, 4])
Increment items 0 and 1, and increment item 2 twice:
>>> a = np.array([1, 2, 3, 4])
>>> np.add.at(a, [0, 1, 2, 2], 1)
>>> a
array([2, 3, 5, 4])
Add items 0 and 1 in first array to second array,
and store results in first array:
>>> a = np.array([1, 2, 3, 4])
>>> b = np.array([1, 2])
>>> np.add.at(a, [0, 1], b)
>>> a
array([2, 4, 3, 4])
"""))
##############################################################################
#
# Documentation for dtype attributes and methods
#
##############################################################################
##############################################################################
#
# dtype object
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype',
"""
dtype(obj, align=False, copy=False)
Create a data type object.
A numpy array is homogeneous, and contains elements described by a
dtype object. A dtype object can be constructed from different
combinations of fundamental numeric types.
Parameters
----------
obj
Object to be converted to a data type object.
align : bool, optional
Add padding to the fields to match what a C compiler would output
for a similar C-struct. Can be ``True`` only if `obj` is a dictionary
or a comma-separated string. If a struct dtype is being created,
this also sets a sticky alignment flag ``isalignedstruct``.
copy : bool, optional
Make a new copy of the data-type object. If ``False``, the result
may just be a reference to a built-in data-type object.
See also
--------
result_type
Examples
--------
Using array-scalar type:
>>> np.dtype(np.int16)
dtype('int16')
Structured type, one field name 'f1', containing int16:
>>> np.dtype([('f1', np.int16)])
dtype([('f1', '<i2')])
Structured type, one field named 'f1', in itself containing a structured
type with one field:
>>> np.dtype([('f1', [('f1', np.int16)])])
dtype([('f1', [('f1', '<i2')])])
Structured type, two fields: the first field contains an unsigned int, the
second an int32:
>>> np.dtype([('f1', np.uint64), ('f2', np.int32)])
dtype([('f1', '<u8'), ('f2', '<i4')])
Using array-protocol type strings:
>>> np.dtype([('a','f8'),('b','S10')])
dtype([('a', '<f8'), ('b', 'S10')])
Using comma-separated field formats. The shape is (2,3):
>>> np.dtype("i4, (2,3)f8")
dtype([('f0', '<i4'), ('f1', '<f8', (2, 3))])
Using tuples. ``int`` is a fixed type, 3 the field's shape. ``void``
is a flexible type, here of size 10:
>>> np.dtype([('hello',(np.int64,3)),('world',np.void,10)])
dtype([('hello', '<i8', (3,)), ('world', 'V10')])
Subdivide ``int16`` into 2 ``int8``'s, called x and y. 0 and 1 are
the offsets in bytes:
>>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)}))
dtype((numpy.int16, [('x', 'i1'), ('y', 'i1')]))
Using dictionaries. Two fields named 'gender' and 'age':
>>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]})
dtype([('gender', 'S1'), ('age', 'u1')])
Offsets in bytes, here 0 and 25:
>>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)})
dtype([('surname', 'S25'), ('age', 'u1')])
""")
##############################################################################
#
# dtype attributes
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype', ('alignment',
"""
The required alignment (bytes) of this data-type according to the compiler.
More information is available in the C-API section of the manual.
Examples
--------
>>> x = np.dtype('i4')
>>> x.alignment
4
>>> x = np.dtype(float)
>>> x.alignment
8
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('byteorder',
"""
A character indicating the byte-order of this data-type object.
One of:
=== ==============
'=' native
'<' little-endian
'>' big-endian
'|' not applicable
=== ==============
All built-in data-type objects have byteorder either '=' or '|'.
Examples
--------
>>> dt = np.dtype('i2')
>>> dt.byteorder
'='
>>> # endian is not relevant for 8 bit numbers
>>> np.dtype('i1').byteorder
'|'
>>> # or ASCII strings
>>> np.dtype('S2').byteorder
'|'
>>> # Even if specific code is given, and it is native
>>> # '=' is the byteorder
>>> import sys
>>> sys_is_le = sys.byteorder == 'little'
>>> native_code = sys_is_le and '<' or '>'
>>> swapped_code = sys_is_le and '>' or '<'
>>> dt = np.dtype(native_code + 'i2')
>>> dt.byteorder
'='
>>> # Swapped code shows up as itself
>>> dt = np.dtype(swapped_code + 'i2')
>>> dt.byteorder == swapped_code
True
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('char',
"""A unique character code for each of the 21 different built-in types.
Examples
--------
>>> x = np.dtype(float)
>>> x.char
'd'
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('descr',
"""
`__array_interface__` description of the data-type.
The format is that required by the 'descr' key in the
`__array_interface__` attribute.
Warning: This attribute exists specifically for `__array_interface__`,
and is not a datatype description compatible with `np.dtype`.
Examples
--------
>>> x = np.dtype(float)
>>> x.descr
[('', '<f8')]
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> dt.descr
[('name', '<U16'), ('grades', '<f8', (2,))]
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('fields',
"""
Dictionary of named fields defined for this data type, or ``None``.
The dictionary is indexed by keys that are the names of the fields.
Each entry in the dictionary is a tuple fully describing the field::
(dtype, offset[, title])
Offset is limited to C int, which is signed and usually 32 bits.
If present, the optional title can be any object (if it is a string
or unicode then it will also be a key in the fields dictionary,
otherwise it's meta-data). Notice also that the first two elements
of the tuple can be passed directly as arguments to the ``ndarray.getfield``
and ``ndarray.setfield`` methods.
See Also
--------
ndarray.getfield, ndarray.setfield
Examples
--------
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> print(dt.fields)
{'grades': (dtype(('float64',(2,))), 16), 'name': (dtype('|S16'), 0)}
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('flags',
"""
Bit-flags describing how this data type is to be interpreted.
Bit-masks are in `numpy.core.multiarray` as the constants
`ITEM_HASOBJECT`, `LIST_PICKLE`, `ITEM_IS_POINTER`, `NEEDS_INIT`,
`NEEDS_PYAPI`, `USE_GETITEM`, `USE_SETITEM`. A full explanation
of these flags is in C-API documentation; they are largely useful
for user-defined data-types.
The following example demonstrates that operations on this particular
dtype requires Python C-API.
Examples
--------
>>> x = np.dtype([('a', np.int32, 8), ('b', np.float64, 6)])
>>> x.flags
16
>>> np.core.multiarray.NEEDS_PYAPI
16
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('hasobject',
"""
Boolean indicating whether this dtype contains any reference-counted
objects in any fields or sub-dtypes.
Recall that what is actually in the ndarray memory representing
the Python object is the memory address of that object (a pointer).
Special handling may be required, and this attribute is useful for
distinguishing data types that may contain arbitrary Python objects
and data-types that won't.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isbuiltin',
"""
Integer indicating how this dtype relates to the built-in dtypes.
Read-only.
= ========================================================================
0 if this is a structured array type, with fields
1 if this is a dtype compiled into numpy (such as ints, floats etc)
2 if the dtype is for a user-defined numpy type
A user-defined type uses the numpy C-API machinery to extend
numpy to handle a new array type. See
:ref:`user.user-defined-data-types` in the NumPy manual.
= ========================================================================
Examples
--------
>>> dt = np.dtype('i2')
>>> dt.isbuiltin
1
>>> dt = np.dtype('f8')
>>> dt.isbuiltin
1
>>> dt = np.dtype([('field1', 'f8')])
>>> dt.isbuiltin
0
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isnative',
"""
Boolean indicating whether the byte order of this dtype is native
to the platform.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isalignedstruct',
"""
Boolean indicating whether the dtype is a struct which maintains
field alignment. This flag is sticky, so when combining multiple
structs together, it is preserved and produces new dtypes which
are also aligned.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('itemsize',
"""
The element size of this data-type object.
For 18 of the 21 types this number is fixed by the data-type.
For the flexible data-types, this number can be anything.
Examples
--------
>>> arr = np.array([[1, 2], [3, 4]])
>>> arr.dtype
dtype('int64')
>>> arr.itemsize
8
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> dt.itemsize
80
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('kind',
"""
A character code (one of 'biufcmMOSUV') identifying the general kind of data.
= ======================
b boolean
i signed integer
u unsigned integer
f floating-point
c complex floating-point
m timedelta
M datetime
O object
S (byte-)string
U Unicode
V void
= ======================
Examples
--------
>>> dt = np.dtype('i4')
>>> dt.kind
'i'
>>> dt = np.dtype('f8')
>>> dt.kind
'f'
>>> dt = np.dtype([('field1', 'f8')])
>>> dt.kind
'V'
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('name',
"""
A bit-width name for this data-type.
Un-sized flexible data-type objects do not have this attribute.
Examples
--------
>>> x = np.dtype(float)
>>> x.name
'float64'
>>> x = np.dtype([('a', np.int32, 8), ('b', np.float64, 6)])
>>> x.name
'void640'
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('names',
"""
Ordered list of field names, or ``None`` if there are no fields.
The names are ordered according to increasing byte offset. This can be
used, for example, to walk through all of the named fields in offset order.
Examples
--------
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> dt.names
('name', 'grades')
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('num',
"""
A unique number for each of the 21 different built-in types.
These are roughly ordered from least-to-most precision.
Examples
--------
>>> dt = np.dtype(str)
>>> dt.num
19
>>> dt = np.dtype(float)
>>> dt.num
12
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('shape',
"""
Shape tuple of the sub-array if this data type describes a sub-array,
and ``()`` otherwise.
Examples
--------
>>> dt = np.dtype(('i4', 4))
>>> dt.shape
(4,)
>>> dt = np.dtype(('i4', (2, 3)))
>>> dt.shape
(2, 3)
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('ndim',
"""
Number of dimensions of the sub-array if this data type describes a
sub-array, and ``0`` otherwise.
.. versionadded:: 1.13.0
Examples
--------
>>> x = np.dtype(float)
>>> x.ndim
0
>>> x = np.dtype((float, 8))
>>> x.ndim
1
>>> x = np.dtype(('i4', (3, 4)))
>>> x.ndim
2
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('str',
"""The array-protocol typestring of this data-type object."""))
add_newdoc('numpy.core.multiarray', 'dtype', ('subdtype',
"""
Tuple ``(item_dtype, shape)`` if this `dtype` describes a sub-array, and
None otherwise.
The *shape* is the fixed shape of the sub-array described by this
data type, and *item_dtype* the data type of the array.
If a field whose dtype object has this attribute is retrieved,
then the extra dimensions implied by *shape* are tacked on to
the end of the retrieved array.
See Also
--------
dtype.base
Examples
--------
>>> x = numpy.dtype('8f')
>>> x.subdtype
(dtype('float32'), (8,))
>>> x = numpy.dtype('i2')
>>> x.subdtype
>>>
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('base',
"""
Returns dtype for the base element of the subarrays,
regardless of their dimension or shape.
See Also
--------
dtype.subdtype
Examples
--------
>>> x = numpy.dtype('8f')
>>> x.base
dtype('float32')
>>> x = numpy.dtype('i2')
>>> x.base
dtype('int16')
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('type',
"""The type object used to instantiate a scalar of this data-type."""))
##############################################################################
#
# dtype methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype', ('newbyteorder',
"""
newbyteorder(new_order='S')
Return a new dtype with a different byte order.
Changes are also made in all fields and sub-arrays of the data type.
Parameters
----------
new_order : string, optional
Byte order to force; a value from the byte order specifications
below. The default value ('S') results in swapping the current
byte order. `new_order` codes can be any of:
* 'S' - swap dtype from current to opposite endian
* {'<', 'L'} - little endian
* {'>', 'B'} - big endian
* {'=', 'N'} - native order
* {'|', 'I'} - ignore (no change to byte order)
The code does a case-insensitive check on the first letter of
`new_order` for these alternatives. For example, any of '>'
or 'B' or 'b' or 'brian' are valid to specify big-endian.
Returns
-------
new_dtype : dtype
New dtype object with the given change to the byte order.
Notes
-----
Changes are also made in all fields and sub-arrays of the data type.
Examples
--------
>>> import sys
>>> sys_is_le = sys.byteorder == 'little'
>>> native_code = sys_is_le and '<' or '>'
>>> swapped_code = sys_is_le and '>' or '<'
>>> native_dt = np.dtype(native_code+'i2')
>>> swapped_dt = np.dtype(swapped_code+'i2')
>>> native_dt.newbyteorder('S') == swapped_dt
True
>>> native_dt.newbyteorder() == swapped_dt
True
>>> native_dt == swapped_dt.newbyteorder('S')
True
>>> native_dt == swapped_dt.newbyteorder('=')
True
>>> native_dt == swapped_dt.newbyteorder('N')
True
>>> native_dt == native_dt.newbyteorder('|')
True
>>> np.dtype('<i2') == native_dt.newbyteorder('<')
True
>>> np.dtype('<i2') == native_dt.newbyteorder('L')
True
>>> np.dtype('>i2') == native_dt.newbyteorder('>')
True
>>> np.dtype('>i2') == native_dt.newbyteorder('B')
True
"""))
##############################################################################
#
# Datetime-related Methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'busdaycalendar',
"""
busdaycalendar(weekmask='1111100', holidays=None)
A business day calendar object that efficiently stores information
defining valid days for the busday family of functions.
The default valid days are Monday through Friday ("business days").
A busdaycalendar object can be specified with any set of weekly
valid days, plus an optional "holiday" dates that always will be invalid.
Once a busdaycalendar object is created, the weekmask and holidays
cannot be modified.
.. versionadded:: 1.7.0
Parameters
----------
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates, no matter which
weekday they fall upon. Holiday dates may be specified in any
order, and NaT (not-a-time) dates are ignored. This list is
saved in a normalized form that is suited for fast calculations
of valid days.
Returns
-------
out : busdaycalendar
A business day calendar object containing the specified
weekmask and holidays values.
See Also
--------
is_busday : Returns a boolean array indicating valid days.
busday_offset : Applies an offset counted in valid days.
busday_count : Counts how many valid days are in a half-open date range.
Attributes
----------
Note: once a busdaycalendar object is created, you cannot modify the
weekmask or holidays. The attributes return copies of internal data.
weekmask : (copy) seven-element array of bool
holidays : (copy) sorted array of datetime64[D]
Examples
--------
>>> # Some important days in July
... bdd = np.busdaycalendar(
... holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
>>> # Default is Monday to Friday weekdays
... bdd.weekmask
array([ True, True, True, True, True, False, False])
>>> # Any holidays already on the weekend are removed
... bdd.holidays
array(['2011-07-01', '2011-07-04'], dtype='datetime64[D]')
""")
add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('weekmask',
"""A copy of the seven-element boolean mask indicating valid days."""))
add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('holidays',
"""A copy of the holiday array indicating additional invalid days."""))
add_newdoc('numpy.core.multiarray', 'normalize_axis_index',
"""
normalize_axis_index(axis, ndim, msg_prefix=None)
Normalizes an axis index, `axis`, such that is a valid positive index into
the shape of array with `ndim` dimensions. Raises an AxisError with an
appropriate message if this is not possible.
Used internally by all axis-checking logic.
.. versionadded:: 1.13.0
Parameters
----------
axis : int
The un-normalized index of the axis. Can be negative
ndim : int
The number of dimensions of the array that `axis` should be normalized
against
msg_prefix : str
A prefix to put before the message, typically the name of the argument
Returns
-------
normalized_axis : int
The normalized axis index, such that `0 <= normalized_axis < ndim`
Raises
------
AxisError
If the axis index is invalid, when `-ndim <= axis < ndim` is false.
Examples
--------
>>> normalize_axis_index(0, ndim=3)
0
>>> normalize_axis_index(1, ndim=3)
1
>>> normalize_axis_index(-1, ndim=3)
2
>>> normalize_axis_index(3, ndim=3)
Traceback (most recent call last):
...
AxisError: axis 3 is out of bounds for array of dimension 3
>>> normalize_axis_index(-4, ndim=3, msg_prefix='axes_arg')
Traceback (most recent call last):
...
AxisError: axes_arg: axis -4 is out of bounds for array of dimension 3
""")
add_newdoc('numpy.core.multiarray', 'datetime_data',
"""
datetime_data(dtype, /)
Get information about the step size of a date or time type.
The returned tuple can be passed as the second argument of `numpy.datetime64` and
`numpy.timedelta64`.
Parameters
----------
dtype : dtype
The dtype object, which must be a `datetime64` or `timedelta64` type.
Returns
-------
unit : str
The :ref:`datetime unit <arrays.dtypes.dateunits>` on which this dtype
is based.
count : int
The number of base units in a step.
Examples
--------
>>> dt_25s = np.dtype('timedelta64[25s]')
>>> np.datetime_data(dt_25s)
('s', 25)
>>> np.array(10, dt_25s).astype('timedelta64[s]')
array(250, dtype='timedelta64[s]')
The result can be used to construct a datetime that uses the same units
as a timedelta
>>> np.datetime64('2010', np.datetime_data(dt_25s))
numpy.datetime64('2010-01-01T00:00:00','25s')
""")
##############################################################################
#
# Documentation for `generic` attributes and methods
#
##############################################################################
add_newdoc('numpy.core.numerictypes', 'generic',
"""
Base class for numpy scalar types.
Class from which most (all?) numpy scalar types are derived. For
consistency, exposes the same API as `ndarray`, despite many
consequent attributes being either "get-only," or completely irrelevant.
This is the class from which it is strongly suggested users should derive
custom scalar types.
""")
# Attributes
add_newdoc('numpy.core.numerictypes', 'generic', ('T',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class so as to
provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('base',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class so as to
a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('data',
"""Pointer to start of data."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('dtype',
"""Get array data-descriptor."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('flags',
"""The integer value of flags."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('flat',
"""A 1-D view of the scalar."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('imag',
"""The imaginary part of the scalar."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('itemsize',
"""The length of one element in bytes."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('nbytes',
"""The length of the scalar in bytes."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('ndim',
"""The number of array dimensions."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('real',
"""The real part of the scalar."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('shape',
"""Tuple of array dimensions."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('size',
"""The number of elements in the gentype."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('strides',
"""Tuple of bytes steps in each dimension."""))
# Methods
add_newdoc('numpy.core.numerictypes', 'generic', ('all',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('any',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('argmax',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('argmin',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('argsort',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('astype',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('byteswap',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class so as to
provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('choose',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('clip',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('compress',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('conjugate',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('copy',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('cumprod',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('cumsum',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('diagonal',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('dump',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('dumps',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('fill',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('flatten',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('getfield',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('item',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('itemset',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('max',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('mean',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('min',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('newbyteorder',
"""
newbyteorder(new_order='S')
Return a new `dtype` with a different byte order.
Changes are also made in all fields and sub-arrays of the data type.
The `new_order` code can be any from the following:
* 'S' - swap dtype from current to opposite endian
* {'<', 'L'} - little endian
* {'>', 'B'} - big endian
* {'=', 'N'} - native order
* {'|', 'I'} - ignore (no change to byte order)
Parameters
----------
new_order : str, optional
Byte order to force; a value from the byte order specifications
above. The default value ('S') results in swapping the current
byte order. The code does a case-insensitive check on the first
letter of `new_order` for the alternatives above. For example,
any of 'B' or 'b' or 'biggish' are valid to specify big-endian.
Returns
-------
new_dtype : dtype
New `dtype` object with the given change to the byte order.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('nonzero',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('prod',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('ptp',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('put',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('ravel',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('repeat',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('reshape',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('resize',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('round',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('searchsorted',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('setfield',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('setflags',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class so as to
provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('sort',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('squeeze',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('std',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('sum',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('swapaxes',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('take',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('tofile',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('tolist',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('tostring',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('trace',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('transpose',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('var',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('view',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
##############################################################################
#
# Documentation for scalar type abstract base classes in type hierarchy
#
##############################################################################
add_newdoc('numpy.core.numerictypes', 'number',
"""
Abstract base class of all numeric scalar types.
""")
add_newdoc('numpy.core.numerictypes', 'integer',
"""
Abstract base class of all integer scalar types.
""")
add_newdoc('numpy.core.numerictypes', 'signedinteger',
"""
Abstract base class of all signed integer scalar types.
""")
add_newdoc('numpy.core.numerictypes', 'unsignedinteger',
"""
Abstract base class of all unsigned integer scalar types.
""")
add_newdoc('numpy.core.numerictypes', 'inexact',
"""
Abstract base class of all numeric scalar types with a (potentially)
inexact representation of the values in its range, such as
floating-point numbers.
""")
add_newdoc('numpy.core.numerictypes', 'floating',
"""
Abstract base class of all floating-point scalar types.
""")
add_newdoc('numpy.core.numerictypes', 'complexfloating',
"""
Abstract base class of all complex number scalar types that are made up of
floating-point numbers.
""")
add_newdoc('numpy.core.numerictypes', 'flexible',
"""
Abstract base class of all scalar types without predefined length.
The actual size of these types depends on the specific `np.dtype`
instantiation.
""")
add_newdoc('numpy.core.numerictypes', 'character',
"""
Abstract base class of all character string scalar types.
""")
##############################################################################
#
# Documentation for concrete scalar classes
#
##############################################################################
def numeric_type_aliases(aliases):
def type_aliases_gen():
for alias, doc in aliases:
try:
alias_type = getattr(_numerictypes, alias)
except AttributeError:
# The set of aliases that actually exist varies between platforms
pass
else:
yield (alias_type, alias, doc)
return list(type_aliases_gen())
possible_aliases = numeric_type_aliases([
('int8', '8-bit signed integer (-128 to 127)'),
('int16', '16-bit signed integer (-32768 to 32767)'),
('int32', '32-bit signed integer (-2147483648 to 2147483647)'),
('int64', '64-bit signed integer (-9223372036854775808 to 9223372036854775807)'),
('intp', 'Signed integer large enough to fit pointer, compatible with C ``intptr_t``'),
('uint8', '8-bit unsigned integer (0 to 255)'),
('uint16', '16-bit unsigned integer (0 to 65535)'),
('uint32', '32-bit unsigned integer (0 to 4294967295)'),
('uint64', '64-bit unsigned integer (0 to 18446744073709551615)'),
('uintp', 'Unsigned integer large enough to fit pointer, compatible with C ``uintptr_t``'),
('float16', '16-bit-precision floating-point number type: sign bit, 5 bits exponent, 10 bits mantissa'),
('float32', '32-bit-precision floating-point number type: sign bit, 8 bits exponent, 23 bits mantissa'),
('float64', '64-bit precision floating-point number type: sign bit, 11 bits exponent, 52 bits mantissa'),
('float96', '96-bit extended-precision floating-point number type'),
('float128', '128-bit extended-precision floating-point number type'),
('complex64', 'Complex number type composed of 2 32-bit-precision floating-point numbers'),
('complex128', 'Complex number type composed of 2 64-bit-precision floating-point numbers'),
('complex192', 'Complex number type composed of 2 96-bit extended-precision floating-point numbers'),
('complex256', 'Complex number type composed of 2 128-bit extended-precision floating-point numbers'),
])
def add_newdoc_for_scalar_type(obj, fixed_aliases, doc):
o = getattr(_numerictypes, obj)
character_code = dtype(o).char
canonical_name_doc = "" if obj == o.__name__ else "Canonical name: ``np.{}``.\n ".format(obj)
alias_doc = ''.join("Alias: ``np.{}``.\n ".format(alias) for alias in fixed_aliases)
alias_doc += ''.join("Alias *on this platform*: ``np.{}``: {}.\n ".format(alias, doc)
for (alias_type, alias, doc) in possible_aliases if alias_type is o)
docstring = """
{doc}
Character code: ``'{character_code}'``.
{canonical_name_doc}{alias_doc}
""".format(doc=doc.strip(), character_code=character_code,
canonical_name_doc=canonical_name_doc, alias_doc=alias_doc)
add_newdoc('numpy.core.numerictypes', obj, docstring)
add_newdoc_for_scalar_type('bool_', ['bool8'],
"""
Boolean type (True or False), stored as a byte.
""")
add_newdoc_for_scalar_type('byte', [],
"""
Signed integer type, compatible with C ``char``.
""")
add_newdoc_for_scalar_type('short', [],
"""
Signed integer type, compatible with C ``short``.
""")
add_newdoc_for_scalar_type('intc', [],
"""
Signed integer type, compatible with C ``int``.
""")
add_newdoc_for_scalar_type('int_', [],
"""
Signed integer type, compatible with Python `int` anc C ``long``.
""")
add_newdoc_for_scalar_type('longlong', [],
"""
Signed integer type, compatible with C ``long long``.
""")
add_newdoc_for_scalar_type('ubyte', [],
"""
Unsigned integer type, compatible with C ``unsigned char``.
""")
add_newdoc_for_scalar_type('ushort', [],
"""
Unsigned integer type, compatible with C ``unsigned short``.
""")
add_newdoc_for_scalar_type('uintc', [],
"""
Unsigned integer type, compatible with C ``unsigned int``.
""")
add_newdoc_for_scalar_type('uint', [],
"""
Unsigned integer type, compatible with C ``unsigned long``.
""")
add_newdoc_for_scalar_type('ulonglong', [],
"""
Signed integer type, compatible with C ``unsigned long long``.
""")
add_newdoc_for_scalar_type('half', [],
"""
Half-precision floating-point number type.
""")
add_newdoc_for_scalar_type('single', [],
"""
Single-precision floating-point number type, compatible with C ``float``.
""")
add_newdoc_for_scalar_type('double', ['float_'],
"""
Double-precision floating-point number type, compatible with Python `float`
and C ``double``.
""")
add_newdoc_for_scalar_type('longdouble', ['longfloat'],
"""
Extended-precision floating-point number type, compatible with C
``long double`` but not necessarily with IEEE 754 quadruple-precision.
""")
add_newdoc_for_scalar_type('csingle', ['singlecomplex'],
"""
Complex number type composed of two single-precision floating-point
numbers.
""")
add_newdoc_for_scalar_type('cdouble', ['cfloat', 'complex_'],
"""
Complex number type composed of two double-precision floating-point
numbers, compatible with Python `complex`.
""")
add_newdoc_for_scalar_type('clongdouble', ['clongfloat', 'longcomplex'],
"""
Complex number type composed of two extended-precision floating-point
numbers.
""")
add_newdoc_for_scalar_type('object_', [],
"""
Any Python object.
""")
# TODO: work out how to put this on the base class, np.floating
for float_name in ('half', 'single', 'double', 'longdouble'):
add_newdoc('numpy.core.numerictypes', float_name, ('as_integer_ratio',
"""
{ftype}.as_integer_ratio() -> (int, int)
Return a pair of integers, whose ratio is exactly equal to the original
floating point number, and with a positive denominator.
Raise OverflowError on infinities and a ValueError on NaNs.
>>> np.{ftype}(10.0).as_integer_ratio()
(10, 1)
>>> np.{ftype}(0.0).as_integer_ratio()
(0, 1)
>>> np.{ftype}(-.25).as_integer_ratio()
(-1, 4)
""".format(ftype=float_name)))
| {
"content_hash": "1358212dd08a4d79cf1698716bf9d4b5",
"timestamp": "",
"source": "github",
"line_count": 6856,
"max_line_length": 109,
"avg_line_length": 29.510939323220537,
"alnum_prop": 0.5922738932520129,
"repo_name": "MSeifert04/numpy",
"id": "dbe3d226fdef4efeec37522e1502f03c9e6e590f",
"size": "202327",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "numpy/core/_add_newdocs.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "9050105"
},
{
"name": "C++",
"bytes": "189464"
},
{
"name": "Fortran",
"bytes": "10884"
},
{
"name": "JavaScript",
"bytes": "16928"
},
{
"name": "Makefile",
"bytes": "4290"
},
{
"name": "Python",
"bytes": "8251054"
},
{
"name": "Shell",
"bytes": "8345"
},
{
"name": "sed",
"bytes": "5741"
}
],
"symlink_target": ""
} |
import sys
import os
import shutil
from tags import *
import glob
import subprocess
from engine import create_context
def find_bblayers():
"""
Find and return a sanitized list of the layers found in BBLAYERS.
"""
try:
builddir = os.environ["BUILDDIR"]
except KeyError:
print "BUILDDIR not found, exiting. (Did you forget to source oe-init-build-env?)"
sys.exit(1)
bblayers_conf = os.path.join(builddir, "conf/bblayers.conf")
layers = []
bitbake_env_cmd = "bitbake -e"
bitbake_env_lines = subprocess.Popen(bitbake_env_cmd, shell=True,
stdout=subprocess.PIPE).stdout.read()
if not bitbake_env_lines:
print "Couldn't get '%s' output, exiting." % bitbake_env_cmd
sys.exit(1)
for line in bitbake_env_lines.split('\n'):
bblayers = get_line_val(line, "BBLAYERS")
if (bblayers):
break
if not bblayers:
print "Couldn't find BBLAYERS in %s output, exiting." % \
bitbake_env_cmd
sys.exit(1)
raw_layers = bblayers.split()
for layer in raw_layers:
if layer == 'BBLAYERS' or '=' in layer:
continue
layers.append(layer)
return layers
def get_line_val(line, key):
"""
Extract the value from the VAR="val" string
"""
if line.startswith(key + "="):
stripped_line = line.split('=')[1]
stripped_line = stripped_line.replace('\"', '')
return stripped_line
return None
def find_meta_layer():
"""
Find and return the meta layer in BBLAYERS.
"""
layers = find_bblayers()
for layer in layers:
if layer.endswith("meta"):
return layer
return None
def find_bsp_layer(machine):
"""
Find and return a machine's BSP layer in BBLAYERS.
"""
layers = find_bblayers()
for layer in layers:
if layer.endswith(machine):
return layer
print "Unable to find the BSP layer for machine %s." % machine
print "Please make sure it is listed in bblayers.conf"
sys.exit(1)
def gen_choices_str(choices):
"""
Generate a numbered list of choices from a list of choices for
display to the user.
"""
choices_str = ""
for i, choice in enumerate(choices):
choices_str += "\t" + str(i + 1) + ") " + choice + "\n"
return choices_str
def open_user_file(scripts_path, machine, userfile, mode):
"""
Find one of the user files (user-config.cfg, user-patches.scc)
associated with the machine (could be in files/,
linux-yocto-custom/, etc). Returns the open file if found, None
otherwise.
The caller is responsible for closing the file returned.
"""
layer = find_bsp_layer(machine)
linuxdir = os.path.join(layer, "recipes-kernel/linux")
linuxdir_list = os.listdir(linuxdir)
for fileobj in linuxdir_list:
fileobj_path = os.path.join(linuxdir, fileobj)
if os.path.isdir(fileobj_path):
userfile_name = os.path.join(fileobj_path, userfile)
try:
f = open(userfile_name, mode)
return f
except IOError:
continue
return None
def read_config_items(scripts_path, machine):
"""
Find and return a list of config items (CONFIG_XXX) in a machine's
user-defined config fragment [${machine}-user-config.cfg].
"""
config_items = []
f = open_user_file(scripts_path, machine, machine+"-user-config.cfg", "r")
lines = f.readlines()
for line in lines:
s = line.strip()
if s and not s.startswith("#"):
config_items.append(s)
f.close()
return config_items
def write_config_items(scripts_path, machine, config_items):
"""
Write (replace) the list of config items (CONFIG_XXX) in a
machine's user-defined config fragment [${machine}=user-config.cfg].
"""
f = open_user_file(scripts_path, machine, machine+"-user-config.cfg", "w")
for item in config_items:
f.write(item + "\n")
f.close()
kernel_contents_changed(scripts_path, machine)
def yocto_kernel_config_list(scripts_path, machine):
"""
Display the list of config items (CONFIG_XXX) in a machine's
user-defined config fragment [${machine}-user-config.cfg].
"""
config_items = read_config_items(scripts_path, machine)
print "The current set of machine-specific kernel config items for %s is:" % machine
print gen_choices_str(config_items)
def yocto_kernel_config_rm(scripts_path, machine):
"""
Display the list of config items (CONFIG_XXX) in a machine's
user-defined config fragment [${machine}-user-config.cfg], prompt the user
for one or more to remove, and remove them.
"""
config_items = read_config_items(scripts_path, machine)
print "Specify the kernel config items to remove:"
input = raw_input(gen_choices_str(config_items))
rm_choices = input.split()
rm_choices.sort()
removed = []
for choice in reversed(rm_choices):
try:
idx = int(choice) - 1
except ValueError:
print "Invalid choice (%s), exiting" % choice
sys.exit(1)
if idx < 0 or idx >= len(config_items):
print "Invalid choice (%d), exiting" % (idx + 1)
sys.exit(1)
removed.append(config_items.pop(idx))
write_config_items(scripts_path, machine, config_items)
print "Removed items:"
for r in removed:
print "\t%s" % r
def yocto_kernel_config_add(scripts_path, machine, config_items):
"""
Add one or more config items (CONFIG_XXX) to a machine's
user-defined config fragment [${machine}-user-config.cfg].
"""
new_items = []
dup_items = []
cur_items = read_config_items(scripts_path, machine)
for item in config_items:
if not item.startswith("CONFIG") or (not "=y" in item and not "=m" in item):
print "Invalid config item (%s), exiting" % item
sys.exit(1)
if item not in cur_items and item not in new_items:
new_items.append(item)
else:
dup_items.append(item)
if len(new_items) > 0:
cur_items.extend(new_items)
write_config_items(scripts_path, machine, cur_items)
print "Added item%s:" % ("" if len(new_items)==1 else "s")
for n in new_items:
print "\t%s" % n
if len(dup_items) > 0:
output="The following item%s already exist%s in the current configuration, ignoring %s:" % \
(("","s", "it") if len(dup_items)==1 else ("s", "", "them" ))
print output
for n in dup_items:
print "\t%s" % n
def find_current_kernel(bsp_layer, machine):
"""
Determine the kernel and version currently being used in the BSP.
"""
machine_conf = os.path.join(bsp_layer, "conf/machine/" + machine + ".conf")
preferred_kernel = preferred_kernel_version = preferred_version_varname = None
f = open(machine_conf, "r")
lines = f.readlines()
for line in lines:
if line.strip().startswith("PREFERRED_PROVIDER_virtual/kernel"):
preferred_kernel = line.split()[-1]
preferred_kernel = preferred_kernel.replace('\"','')
preferred_version_varname = "PREFERRED_VERSION_" + preferred_kernel
if preferred_version_varname and line.strip().startswith(preferred_version_varname):
preferred_kernel_version = line.split()[-1]
preferred_kernel_version = preferred_kernel_version.replace('\"','')
preferred_kernel_version = preferred_kernel_version.replace('%','')
if preferred_kernel and preferred_kernel_version:
return preferred_kernel + "_" + preferred_kernel_version
elif preferred_kernel:
return preferred_kernel
def find_filesdir(scripts_path, machine):
"""
Find the name of the 'files' dir associated with the machine
(could be in files/, linux-yocto-custom/, etc). Returns the name
of the files dir if found, None otherwise.
"""
layer = find_bsp_layer(machine)
filesdir = None
linuxdir = os.path.join(layer, "recipes-kernel/linux")
linuxdir_list = os.listdir(linuxdir)
for fileobj in linuxdir_list:
fileobj_path = os.path.join(linuxdir, fileobj)
if os.path.isdir(fileobj_path):
# this could be files/ or linux-yocto-custom/, we have no way of distinguishing
# so we take the first (and normally only) dir we find as the 'filesdir'
filesdir = fileobj_path
return filesdir
def read_patch_items(scripts_path, machine):
"""
Find and return a list of patch items in a machine's user-defined
patch list [${machine}-user-patches.scc].
"""
patch_items = []
f = open_user_file(scripts_path, machine, machine+"-user-patches.scc", "r")
lines = f.readlines()
for line in lines:
s = line.strip()
if s and not s.startswith("#"):
fields = s.split()
if not fields[0] == "patch":
continue
patch_items.append(fields[1])
f.close()
return patch_items
def write_patch_items(scripts_path, machine, patch_items):
"""
Write (replace) the list of patches in a machine's user-defined
patch list [${machine}-user-patches.scc].
"""
f = open_user_file(scripts_path, machine, machine+"-user-patches.scc", "w")
for item in patch_items:
f.write("patch " + item + "\n")
f.close()
kernel_contents_changed(scripts_path, machine)
def yocto_kernel_patch_list(scripts_path, machine):
"""
Display the list of patches in a machine's user-defined patch list
[${machine}-user-patches.scc].
"""
patches = read_patch_items(scripts_path, machine)
print "The current set of machine-specific patches for %s is:" % machine
print gen_choices_str(patches)
def yocto_kernel_patch_rm(scripts_path, machine):
"""
Remove one or more patches from a machine's user-defined patch
list [${machine}-user-patches.scc].
"""
patches = read_patch_items(scripts_path, machine)
print "Specify the patches to remove:"
input = raw_input(gen_choices_str(patches))
rm_choices = input.split()
rm_choices.sort()
removed = []
filesdir = find_filesdir(scripts_path, machine)
if not filesdir:
print "Couldn't rm patch(es) since we couldn't find a 'files' dir"
sys.exit(1)
for choice in reversed(rm_choices):
try:
idx = int(choice) - 1
except ValueError:
print "Invalid choice (%s), exiting" % choice
sys.exit(1)
if idx < 0 or idx >= len(patches):
print "Invalid choice (%d), exiting" % (idx + 1)
sys.exit(1)
filesdir_patch = os.path.join(filesdir, patches[idx])
if os.path.isfile(filesdir_patch):
os.remove(filesdir_patch)
removed.append(patches[idx])
patches.pop(idx)
write_patch_items(scripts_path, machine, patches)
print "Removed patches:"
for r in removed:
print "\t%s" % r
def yocto_kernel_patch_add(scripts_path, machine, patches):
"""
Add one or more patches to a machine's user-defined patch list
[${machine}-user-patches.scc].
"""
existing_patches = read_patch_items(scripts_path, machine)
for patch in patches:
if os.path.basename(patch) in existing_patches:
print "Couldn't add patch (%s) since it's already been added" % os.path.basename(patch)
sys.exit(1)
filesdir = find_filesdir(scripts_path, machine)
if not filesdir:
print "Couldn't add patch (%s) since we couldn't find a 'files' dir to add it to" % os.path.basename(patch)
sys.exit(1)
new_patches = []
for patch in patches:
if not os.path.isfile(patch):
print "Couldn't find patch (%s), exiting" % patch
sys.exit(1)
basename = os.path.basename(patch)
filesdir_patch = os.path.join(filesdir, basename)
shutil.copyfile(patch, filesdir_patch)
new_patches.append(basename)
cur_items = read_patch_items(scripts_path, machine)
cur_items.extend(new_patches)
write_patch_items(scripts_path, machine, cur_items)
print "Added patches:"
for n in new_patches:
print "\t%s" % n
def inc_pr(line):
"""
Add 1 to the PR value in the given bbappend PR line. For the PR
lines in kernel .bbappends after modifications. Handles PRs of
the form PR := "${PR}.1" as well as PR = "r0".
"""
idx = line.find("\"")
pr_str = line[idx:]
pr_str = pr_str.replace('\"','')
fields = pr_str.split('.')
if len(fields) > 1:
fields[1] = str(int(fields[1]) + 1)
pr_str = "\"" + '.'.join(fields) + "\"\n"
else:
pr_val = pr_str[1:]
pr_str = "\"" + "r" + str(int(pr_val) + 1) + "\"\n"
idx2 = line.find("\"", idx + 1)
line = line[:idx] + pr_str
return line
def kernel_contents_changed(scripts_path, machine):
"""
Do what we need to do to notify the system that the kernel
recipe's contents have changed.
"""
layer = find_bsp_layer(machine)
kernel = find_current_kernel(layer, machine)
if not kernel:
print "Couldn't determine the kernel for this BSP, exiting."
sys.exit(1)
kernel_bbfile = os.path.join(layer, "recipes-kernel/linux/" + kernel + ".bbappend")
if not os.path.isfile(kernel_bbfile):
kernel_bbfile = os.path.join(layer, "recipes-kernel/linux/" + kernel + ".bb")
if not os.path.isfile(kernel_bbfile):
return
kernel_bbfile_prev = kernel_bbfile + ".prev"
shutil.copyfile(kernel_bbfile, kernel_bbfile_prev)
ifile = open(kernel_bbfile_prev, "r")
ofile = open(kernel_bbfile, "w")
ifile_lines = ifile.readlines()
for ifile_line in ifile_lines:
if ifile_line.strip().startswith("PR"):
ifile_line = inc_pr(ifile_line)
ofile.write(ifile_line)
ofile.close()
ifile.close()
def kernels(context):
"""
Return the list of available kernels in the BSP i.e. corresponding
to the kernel .bbappends found in the layer.
"""
archdir = os.path.join(context["scripts_path"], "lib/bsp/substrate/target/arch/" + context["arch"])
kerndir = os.path.join(archdir, "recipes-kernel/linux")
bbglob = os.path.join(kerndir, "*.bbappend")
bbappends = glob.glob(bbglob)
kernels = []
for kernel in bbappends:
filename = os.path.splitext(os.path.basename(kernel))[0]
idx = filename.find(CLOSE_TAG)
if idx != -1:
filename = filename[idx + len(CLOSE_TAG):].strip()
kernels.append(filename)
kernels.append("custom")
return kernels
def extract_giturl(file):
"""
Extract the git url of the kernel repo from the kernel recipe's
SRC_URI.
"""
url = None
f = open(file, "r")
lines = f.readlines()
for line in lines:
line = line.strip()
if line.startswith("SRC_URI"):
line = line[len("SRC_URI"):].strip()
if line.startswith("="):
line = line[1:].strip()
if line.startswith("\""):
line = line[1:].strip()
prot = "git"
for s in line.split(";"):
if s.startswith("git://"):
url = s
if s.startswith("protocol="):
prot = s.split("=")[1]
if url:
url = prot + url[3:]
return url
def find_giturl(context):
"""
Find the git url of the kernel repo from the kernel recipe's
SRC_URI.
"""
name = context["name"]
filebase = context["filename"]
scripts_path = context["scripts_path"]
meta_layer = find_meta_layer()
kerndir = os.path.join(meta_layer, "recipes-kernel/linux")
bbglob = os.path.join(kerndir, "*.bb")
bbs = glob.glob(bbglob)
for kernel in bbs:
filename = os.path.splitext(os.path.basename(kernel))[0]
if filename in filebase:
giturl = extract_giturl(kernel)
return giturl
return None
def read_features(scripts_path, machine):
"""
Find and return a list of features in a machine's user-defined
features fragment [${machine}-user-features.scc].
"""
features = []
f = open_user_file(scripts_path, machine, machine+"-user-features.scc", "r")
lines = f.readlines()
for line in lines:
s = line.strip()
if s and not s.startswith("#"):
feature_include = s.split()
features.append(feature_include[1].strip())
f.close()
return features
def write_features(scripts_path, machine, features):
"""
Write (replace) the list of feature items in a
machine's user-defined features fragment [${machine}=user-features.cfg].
"""
f = open_user_file(scripts_path, machine, machine+"-user-features.scc", "w")
for item in features:
f.write("include " + item + "\n")
f.close()
kernel_contents_changed(scripts_path, machine)
def yocto_kernel_feature_list(scripts_path, machine):
"""
Display the list of features used in a machine's user-defined
features fragment [${machine}-user-features.scc].
"""
features = read_features(scripts_path, machine)
print "The current set of machine-specific features for %s is:" % machine
print gen_choices_str(features)
def yocto_kernel_feature_rm(scripts_path, machine):
"""
Display the list of features used in a machine's user-defined
features fragment [${machine}-user-features.scc], prompt the user
for one or more to remove, and remove them.
"""
features = read_features(scripts_path, machine)
print "Specify the features to remove:"
input = raw_input(gen_choices_str(features))
rm_choices = input.split()
rm_choices.sort()
removed = []
for choice in reversed(rm_choices):
try:
idx = int(choice) - 1
except ValueError:
print "Invalid choice (%s), exiting" % choice
sys.exit(1)
if idx < 0 or idx >= len(features):
print "Invalid choice (%d), exiting" % (idx + 1)
sys.exit(1)
removed.append(features.pop(idx))
write_features(scripts_path, machine, features)
print "Removed features:"
for r in removed:
print "\t%s" % r
def yocto_kernel_feature_add(scripts_path, machine, features):
"""
Add one or more features a machine's user-defined features
fragment [${machine}-user-features.scc].
"""
new_items = []
for item in features:
if not item.endswith(".scc"):
print "Invalid feature (%s), exiting" % item
sys.exit(1)
new_items.append(item)
cur_items = read_features(scripts_path, machine)
cur_items.extend(new_items)
write_features(scripts_path, machine, cur_items)
print "Added features:"
for n in new_items:
print "\t%s" % n
def find_feature_url(git_url):
"""
Find the url of the kern-features.rc kernel for the kernel repo
specified from the BSP's kernel recipe SRC_URI.
"""
feature_url = ""
if git_url.startswith("git://"):
git_url = git_url[len("git://"):].strip()
s = git_url.split("/")
if s[1].endswith(".git"):
s[1] = s[1][:len(s[1]) - len(".git")]
feature_url = "http://" + s[0] + "/cgit/cgit.cgi/" + s[1] + \
"/plain/meta/cfg/kern-features.rc?h=meta"
return feature_url
def find_feature_desc(lines):
"""
Find the feature description and compatibility in the passed-in
set of lines. Returns a string string of the form 'desc
[compat]'.
"""
desc = "no description available"
compat = "unknown"
for line in lines:
idx = line.find("KFEATURE_DESCRIPTION")
if idx != -1:
desc = line[idx + len("KFEATURE_DESCRIPTION"):].strip()
if desc.startswith("\""):
desc = desc[1:]
if desc.endswith("\""):
desc = desc[:-1]
else:
idx = line.find("KFEATURE_COMPATIBILITY")
if idx != -1:
compat = line[idx + len("KFEATURE_COMPATIBILITY"):].strip()
return desc + " [" + compat + "]"
def print_feature_descs(layer, feature_dir):
"""
Print the feature descriptions for the features in feature_dir.
"""
kernel_files_features = os.path.join(layer, "recipes-kernel/linux/files/" +
feature_dir)
for root, dirs, files in os.walk(kernel_files_features):
for file in files:
if file.endswith("~") or file.endswith("#"):
continue
if file.endswith(".scc"):
fullpath = os.path.join(layer, "recipes-kernel/linux/files/" +
feature_dir + "/" + file)
f = open(fullpath)
feature_desc = find_feature_desc(f.readlines())
print feature_dir + "/" + file + ": " + feature_desc
def yocto_kernel_available_features_list(scripts_path, machine):
"""
Display the list of all the kernel features available for use in
BSPs, as gathered from the set of feature sources.
"""
layer = find_bsp_layer(machine)
kernel = find_current_kernel(layer, machine)
if not kernel:
print "Couldn't determine the kernel for this BSP, exiting."
sys.exit(1)
context = create_context(machine, "arch", scripts_path)
context["name"] = "name"
context["filename"] = kernel
giturl = find_giturl(context)
feature_url = find_feature_url(giturl)
feature_cmd = "wget -q -O - " + feature_url
tmp = subprocess.Popen(feature_cmd, shell=True, stdout=subprocess.PIPE).stdout.read()
print "The current set of kernel features available to %s is:\n" % machine
if tmp:
tmpline = tmp.split("\n")
in_kernel_options = False
for line in tmpline:
if not "=" in line:
if in_kernel_options:
break
if "kernel-options" in line:
in_kernel_options = True
continue
if in_kernel_options:
feature_def = line.split("=")
feature_type = feature_def[0].strip()
feature = feature_def[1].strip()
desc = get_feature_desc(giturl, feature)
print "%s: %s" % (feature, desc)
print "[local]"
print_feature_descs(layer, "cfg")
print_feature_descs(layer, "features")
def find_feature_desc_url(git_url, feature):
"""
Find the url of the kernel feature in the kernel repo specified
from the BSP's kernel recipe SRC_URI.
"""
feature_desc_url = ""
if git_url.startswith("git://"):
git_url = git_url[len("git://"):].strip()
s = git_url.split("/")
if s[1].endswith(".git"):
s[1] = s[1][:len(s[1]) - len(".git")]
feature_desc_url = "http://" + s[0] + "/cgit/cgit.cgi/" + s[1] + \
"/plain/meta/cfg/kernel-cache/" + feature + "?h=meta"
return feature_desc_url
def get_feature_desc(git_url, feature):
"""
Return a feature description of the form 'description [compatibility]
BSPs, as gathered from the set of feature sources.
"""
feature_desc_url = find_feature_desc_url(git_url, feature)
feature_desc_cmd = "wget -q -O - " + feature_desc_url
tmp = subprocess.Popen(feature_desc_cmd, shell=True, stdout=subprocess.PIPE).stdout.read()
return find_feature_desc(tmp.split("\n"))
def yocto_kernel_feature_describe(scripts_path, machine, feature):
"""
Display the description of a specific kernel feature available for
use in a BSP.
"""
layer = find_bsp_layer(machine)
kernel = find_current_kernel(layer, machine)
if not kernel:
print "Couldn't determine the kernel for this BSP, exiting."
sys.exit(1)
context = create_context(machine, "arch", scripts_path)
context["name"] = "name"
context["filename"] = kernel
giturl = find_giturl(context)
desc = get_feature_desc(giturl, feature)
print desc
def check_feature_name(feature_name):
"""
Sanity-check the feature name for create/destroy. Return False if not OK.
"""
if not feature_name.endswith(".scc"):
print "Invalid feature name (must end with .scc) [%s], exiting" % feature_name
return False
if "/" in feature_name:
print "Invalid feature name (don't specify directory) [%s], exiting" % feature_name
return False
return True
def check_create_input(feature_items):
"""
Sanity-check the create input. Return False if not OK.
"""
if not check_feature_name(feature_items[0]):
return False
if feature_items[1].endswith(".patch") or feature_items[1].startswith("CONFIG_"):
print "Missing description and/or compatibilty [%s], exiting" % feature_items[1]
return False
if feature_items[2].endswith(".patch") or feature_items[2].startswith("CONFIG_"):
print "Missing description and/or compatibility [%s], exiting" % feature_items[1]
return False
return True
def yocto_kernel_feature_create(scripts_path, machine, feature_items):
"""
Create a recipe-space kernel feature in a BSP.
"""
if not check_create_input(feature_items):
sys.exit(1)
feature = feature_items[0]
feature_basename = feature.split(".")[0]
feature_description = feature_items[1]
feature_compat = feature_items[2]
patches = []
cfg_items = []
for item in feature_items[3:]:
if item.endswith(".patch"):
patches.append(item)
elif item.startswith("CONFIG"):
if ("=y" in item or "=m" in item):
cfg_items.append(item)
else:
print "Invalid feature item (must be .patch or CONFIG_*) [%s], exiting" % item
sys.exit(1)
feature_dirname = "cfg"
if patches:
feature_dirname = "features"
filesdir = find_filesdir(scripts_path, machine)
if not filesdir:
print "Couldn't add feature (%s), no 'files' dir found" % feature
sys.exit(1)
featdir = os.path.join(filesdir, feature_dirname)
if not os.path.exists(featdir):
os.mkdir(featdir)
for patch in patches:
if not os.path.isfile(patch):
print "Couldn't find patch (%s), exiting" % patch
sys.exit(1)
basename = os.path.basename(patch)
featdir_patch = os.path.join(featdir, basename)
shutil.copyfile(patch, featdir_patch)
new_cfg_filename = os.path.join(featdir, feature_basename + ".cfg")
new_cfg_file = open(new_cfg_filename, "w")
for cfg_item in cfg_items:
new_cfg_file.write(cfg_item + "\n")
new_cfg_file.close()
new_feature_filename = os.path.join(featdir, feature_basename + ".scc")
new_feature_file = open(new_feature_filename, "w")
new_feature_file.write("define KFEATURE_DESCRIPTION \"" + feature_description + "\"\n")
new_feature_file.write("define KFEATURE_COMPATIBILITY " + feature_compat + "\n\n")
for patch in patches:
patch_dir, patch_file = os.path.split(patch)
new_feature_file.write("patch " + patch_file + "\n")
new_feature_file.write("kconf non-hardware " + feature_basename + ".cfg\n")
new_feature_file.close()
print "Added feature:"
print "\t%s" % feature_dirname + "/" + feature
def feature_in_use(scripts_path, machine, feature):
"""
Determine whether the specified feature is in use by the BSP.
Return True if so, False otherwise.
"""
features = read_features(scripts_path, machine)
for f in features:
if f == feature:
return True
return False
def feature_remove(scripts_path, machine, feature):
"""
Remove the specified feature from the available recipe-space
features defined for the BSP.
"""
features = read_features(scripts_path, machine)
new_features = []
for f in features:
if f == feature:
continue
new_features.append(f)
write_features(scripts_path, machine, new_features)
def yocto_kernel_feature_destroy(scripts_path, machine, feature):
"""
Remove a recipe-space kernel feature from a BSP.
"""
if not check_feature_name(feature):
sys.exit(1)
if feature_in_use(scripts_path, machine, "features/" + feature) or \
feature_in_use(scripts_path, machine, "cfg/" + feature):
print "Feature %s is in use (use 'feature rm' to un-use it first), exiting" % feature
sys.exit(1)
filesdir = find_filesdir(scripts_path, machine)
if not filesdir:
print "Couldn't destroy feature (%s), no 'files' dir found" % feature
sys.exit(1)
feature_dirname = "features"
featdir = os.path.join(filesdir, feature_dirname)
if not os.path.exists(featdir):
print "Couldn't find feature directory (%s)" % feature_dirname
sys.exit(1)
feature_fqn = os.path.join(featdir, feature)
if not os.path.exists(feature_fqn):
feature_dirname = "cfg"
featdir = os.path.join(filesdir, feature_dirname)
if not os.path.exists(featdir):
print "Couldn't find feature directory (%s)" % feature_dirname
sys.exit(1)
feature_fqn = os.path.join(featdir, feature_filename)
if not os.path.exists(feature_fqn):
print "Couldn't find feature (%s)" % feature
sys.exit(1)
f = open(feature_fqn, "r")
lines = f.readlines()
for line in lines:
s = line.strip()
if s.startswith("patch ") or s.startswith("kconf "):
split_line = s.split()
filename = os.path.join(featdir, split_line[-1])
if os.path.exists(filename):
os.remove(filename)
f.close()
os.remove(feature_fqn)
feature_remove(scripts_path, machine, feature)
print "Removed feature:"
print "\t%s" % feature_dirname + "/" + feature
def base_branches(context):
"""
Return a list of the base branches found in the kernel git repo.
"""
giturl = find_giturl(context)
print "Getting branches from remote repo %s..." % giturl
gitcmd = "git ls-remote %s *heads* 2>&1" % (giturl)
tmp = subprocess.Popen(gitcmd, shell=True, stdout=subprocess.PIPE).stdout.read()
branches = []
if tmp:
tmpline = tmp.split("\n")
for line in tmpline:
if len(line)==0:
break;
if not line.endswith("base"):
continue;
idx = line.find("refs/heads/")
kbranch = line[idx + len("refs/heads/"):]
if kbranch.find("/") == -1 and kbranch.find("base") == -1:
continue
idx = kbranch.find("base")
branches.append(kbranch[:idx - 1])
return branches
def all_branches(context):
"""
Return a list of all the branches found in the kernel git repo.
"""
giturl = find_giturl(context)
print "Getting branches from remote repo %s..." % giturl
gitcmd = "git ls-remote %s *heads* 2>&1" % (giturl)
tmp = subprocess.Popen(gitcmd, shell=True, stdout=subprocess.PIPE).stdout.read()
branches = []
base_prefixes = None
try:
branches_base = context["branches_base"]
if branches_base:
base_prefixes = branches_base.split(":")
except KeyError:
pass
arch = context["arch"]
if tmp:
tmpline = tmp.split("\n")
for line in tmpline:
if len(line)==0:
break;
idx = line.find("refs/heads/")
kbranch = line[idx + len("refs/heads/"):]
kbranch_prefix = kbranch.rsplit("/", 1)[0]
if base_prefixes:
for base_prefix in base_prefixes:
if kbranch_prefix == base_prefix:
branches.append(kbranch)
continue
if (kbranch.find("/") != -1 and
(kbranch.find("standard") != -1 or kbranch.find("base") != -1) or
kbranch == "base"):
branches.append(kbranch)
continue
return branches
| {
"content_hash": "20526f611a7774594bc454b33d944aba",
"timestamp": "",
"source": "github",
"line_count": 1043,
"max_line_length": 115,
"avg_line_length": 31.17353787152445,
"alnum_prop": 0.5958356400319862,
"repo_name": "marcosbontempo/inatelos",
"id": "ba68b60fcb1622ee3381c1c36317a4db113955a5",
"size": "33524",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "poky-daisy/scripts/lib/bsp/kernel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "158"
},
{
"name": "BitBake",
"bytes": "1910696"
},
{
"name": "BlitzBasic",
"bytes": "4400"
},
{
"name": "C",
"bytes": "1751572"
},
{
"name": "C++",
"bytes": "354295"
},
{
"name": "CMake",
"bytes": "6537"
},
{
"name": "CSS",
"bytes": "27029"
},
{
"name": "Groff",
"bytes": "502444"
},
{
"name": "HTML",
"bytes": "141762"
},
{
"name": "JavaScript",
"bytes": "22555"
},
{
"name": "Lua",
"bytes": "1194"
},
{
"name": "Makefile",
"bytes": "32254"
},
{
"name": "Nginx",
"bytes": "2744"
},
{
"name": "Perl",
"bytes": "66300"
},
{
"name": "Perl6",
"bytes": "73"
},
{
"name": "Python",
"bytes": "3529760"
},
{
"name": "Shell",
"bytes": "598521"
},
{
"name": "Tcl",
"bytes": "60106"
},
{
"name": "VimL",
"bytes": "8506"
},
{
"name": "XSLT",
"bytes": "8814"
}
],
"symlink_target": ""
} |
from twisted.internet import task
from twisted.internet import reactor
from twisted.web import resource, static
from coherence import __version__
from coherence.extern.et import ET, indent
from coherence.upnp.services.servers.switch_power_server import SwitchPowerServer
from coherence.upnp.devices.basics import RootDeviceXML, DeviceHttpRoot, BasicDeviceMixin
import coherence.extern.louie as louie
from coherence import log
class HttpRoot(DeviceHttpRoot):
logCategory = 'binarylight'
class BinaryLight(log.Loggable,BasicDeviceMixin):
logCategory = 'binarylight'
device_type = 'BinaryLight'
version = 1
def fire(self,backend,**kwargs):
if kwargs.get('no_thread_needed',False) == False:
""" this could take some time, put it in a thread to be sure it doesn't block
as we can't tell for sure that every backend is implemented properly """
from twisted.internet import threads
d = threads.deferToThread(backend, self, **kwargs)
def backend_ready(backend):
self.backend = backend
def backend_failure(x):
self.warning('backend not installed, %s activation aborted' % self.device_type)
self.debug(x)
d.addCallback(backend_ready)
d.addErrback(backend_failure)
# FIXME: we need a timeout here so if the signal we wait for not arrives we'll
# can close down this device
else:
self.backend = backend(self, **kwargs)
def init_complete(self, backend):
if self.backend != backend:
return
self._services = []
self._devices = []
try:
self.switch_power_server = SwitchPowerServer(self)
self._services.append(self.switch_power_server)
except LookupError,msg:
self.warning( 'SwitchPowerServer', msg)
raise LookupError(msg)
upnp_init = getattr(self.backend, "upnp_init", None)
if upnp_init:
upnp_init()
self.web_resource = HttpRoot(self)
self.coherence.add_web_resource( str(self.uuid)[5:], self.web_resource)
version = self.version
while version > 0:
self.web_resource.putChild( 'description-%d.xml' % version,
RootDeviceXML( self.coherence.hostname,
str(self.uuid),
self.coherence.urlbase,
device_type=self.device_type, version=version,
friendly_name=self.backend.name,
model_description='Coherence UPnP %s' % self.device_type,
model_name='Coherence UPnP %s' % self.device_type,
services=self._services,
devices=self._devices,
icons=self.icons))
version -= 1
self.web_resource.putChild('SwitchPower', self.switch_power_server)
for icon in self.icons:
if icon.has_key('url'):
if icon['url'].startswith('file://'):
self.web_resource.putChild(os.path.basename(icon['url']),
static.File(icon['url'][7:]))
self.register()
self.warning("%s %s (%s) activated with %s" % (self.backend.name, self.device_type, self.backend, str(self.uuid)[5:]))
| {
"content_hash": "6f6238b1a1fc1f24834b6b4cd09afbff",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 126,
"avg_line_length": 36.96875,
"alnum_prop": 0.5680473372781065,
"repo_name": "sreichholf/python-coherence",
"id": "5e993752e7d373d9716d87aafe32c0785362df3d",
"size": "3713",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "coherence/upnp/devices/binary_light.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Groff",
"bytes": "712"
},
{
"name": "Python",
"bytes": "1246744"
},
{
"name": "Shell",
"bytes": "1569"
}
],
"symlink_target": ""
} |
from conan.packager import ConanMultiPackager
if __name__ == "__main__":
builder = ConanMultiPackager(username="memsharded", channel="testing")
builder.add_common_builds(shared_option_name="Protobuf:shared")
filtered_builds = []
for settings, options in builder.builds:
if settings["compiler.version"] in (12, 14):
filtered_builds.append([settings, options])
builder.builds = filtered_builds
builder.run()
| {
"content_hash": "20fc7067ef0ad926ff4daa2ec316ffe3",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 74,
"avg_line_length": 37.75,
"alnum_prop": 0.6843267108167771,
"repo_name": "memsharded/conan-protobuf",
"id": "db31155c6bb3a39f9023f7458526916a2df1a94d",
"size": "453",
"binary": false,
"copies": "1",
"ref": "refs/heads/testing",
"path": "build_ci.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "672"
},
{
"name": "CMake",
"bytes": "22681"
},
{
"name": "Protocol Buffer",
"bytes": "388"
},
{
"name": "Python",
"bytes": "7374"
},
{
"name": "Shell",
"bytes": "976"
}
],
"symlink_target": ""
} |
import re
import sys
import os
import subprocess
import ssl
import shlex
if len(sys.argv) == 1:
print("Usage: add_certificate.py <SERVER>")
sys.exit(1)
host = sys.argv[1]
cert_file = "cert.pem"
cert = ssl.get_server_certificate((host, 443))
f = open(cert_file, 'w')
f.write(cert)
f.close()
bcjar = "tools/bcprov-jdk15on-146.jar"
truststore = "opacclient/libopac/src/main/resources/ssl_trust_store.bks"
storepass = "ro5eivoijeeGohsh0daequoo5Zeepaen"
alias = host + "-" + subprocess.check_output("openssl x509 -inform PEM -subject_hash -noout -in " + cert_file, shell=True).decode('utf-8')
print("Adding certificate to " + truststore + "...")
subprocess.call("keytool -import -v -trustcacerts -alias " + alias +
" -file " + cert_file +
" -keystore " + truststore + " -storetype BKS" +
" -providerclass org.bouncycastle.jce.provider.BouncyCastleProvider" +
" -providerpath " + bcjar +
" -storepass " + storepass, shell=True)
os.remove(cert_file)
| {
"content_hash": "fffe94d7523764784f7bec0bed55faca",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 138,
"avg_line_length": 29.457142857142856,
"alnum_prop": 0.6537342386032978,
"repo_name": "raphaelm/opacclient",
"id": "99292e35a4d2fbab3a3891d137914f337b6f0655",
"size": "1031",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tools/add_certificate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "996567"
},
{
"name": "Java",
"bytes": "1593779"
},
{
"name": "Shell",
"bytes": "2035"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from .models import Server
class ServerAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug': ('name', )}
admin.site.register(Server, ServerAdmin)
| {
"content_hash": "eb5113595c2373bc752b7bbf9e974eaa",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 46,
"avg_line_length": 19,
"alnum_prop": 0.7421052631578947,
"repo_name": "Jonpro03/Minecrunch_Web",
"id": "f6ea9698c9f7d0bbb56ef87a3aaaeea0d361c166",
"size": "190",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/servers/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14516"
},
{
"name": "HTML",
"bytes": "15289"
},
{
"name": "JavaScript",
"bytes": "672"
},
{
"name": "Python",
"bytes": "29985"
}
],
"symlink_target": ""
} |
"""Displays all countries matching a search criteria.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import dfa
def main(client):
# Initialize appropriate service.
spotlight_service = client.GetService(
'spotlight', 'v1.20', 'https://advertisersapitest.doubleclick.net')
# Set search criteria.
country_search_criteria = {
'secure': 'false'
}
# Get countries.
results = spotlight_service.getCountriesByCriteria(
country_search_criteria)
# Display country names, codes and secure server support information.
if results:
for country in results:
print ('Country with name \'%s\', country code \'%s\', and supports a'
' secure server? \'%s\'.'
% (country['name'], country['countryCode'], country['secure']))
else:
print 'No countries found for your criteria.'
if __name__ == '__main__':
# Initialize client object.
dfa_client = dfa.DfaClient.LoadFromStorage()
main(dfa_client)
| {
"content_hash": "057ed41a8804fd7a5123a95e3151971f",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 77,
"avg_line_length": 29.452380952380953,
"alnum_prop": 0.698464025869038,
"repo_name": "haveal/googleads-python-lib",
"id": "4f79e613d37b28cee407fb920c0161aa891e089b",
"size": "1855",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/dfa/v1_20/get_countries.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "168602"
}
],
"symlink_target": ""
} |
import asyncio
from random import randint
import common
from discord.ext import commands
class DrinkBank(commands.Cog):
"""Handles the DrankBank and registering drinks."""
def __init__(self, bot):
self.bot = bot
@commands.command(name='drink', aliases=['bottomsup', 'drank'])
@commands.cooldown(1, 60, type=commands.BucketType.user)
async def drink(self, ctx):
"""Log a drink taken"""
author = str(ctx.message.author.display_name)
if author in common.users:
output = "Bottoms up, {}. ".format(author)
result = consume_drink(author)
if result < -5:
output += "Whoa there buddy, drinking around here is a " \
"friendship activity. For your sake, I'll go ahead " \
"and forget about that one."
common.users[author]["drinks_owed"] = -5
elif result < 0:
output += "You're now banking **{}** dranks.".format(-result)
else:
output += "You now owe {} drinks.".format(result)
await ctx.send(output)
else:
await ctx.send("I don't know you, man.")
# update the db
common.whos_in.update_db()
@commands.command(name='drankbank', aliases=['dbank', 'drinkbank'])
async def drankbank(self, ctx):
"""See your *assets and liabilities* with the bank of drank"""
output = ":moneybag: The **drankbank** is now open for business " \
":moneybag:\n"
for name, user in common.users.items():
if "drinks_owed" in user:
if user["drinks_owed"] > 0:
output += "\n**{}** owes **{}** :tumbler_glass: to the " \
"**drankbank**.".format(name, user['drinks_owed'])
elif user['drinks_owed'] < 0:
output += "\n**{}** has **{}** dranks in their ledger. " \
"*In the black!*".format(name,
-user['drinks_owed'])
else:
output += "\n**{}** is in clear and good standing with " \
"the **drankbank**.".format(name)
await ctx.send(output)
@commands.command(name='shot-lottery')
@commands.cooldown(1, 60 * 5)
async def shot_lottery(self, ctx, auto_call=False):
"""Runs a shot-lottery"""
shot_lottery_string = run_shot_lottery(ctx, auto_call)
for x in range(4):
await ctx.send(shot_lottery_string.pop(0))
ctx.trigger_typing()
await asyncio.sleep(4)
while len(shot_lottery_string) > 0:
await ctx.send(shot_lottery_string.pop(0))
common.whos_in.update_db()
def run_shot_lottery(ctx, auto_call=False):
"""
Run a shot lottery
:param ctx: Context
:param auto_call: Was this called from a win?
:rtype: list
:return: Array of strings for the shot lottery
"""
glass = ":tumbler_glass:"
output = ["Alright everyone (@here), its time for the SHOT LOTTERY!"
"\n{} won the last lottery!".format(common.whos_in.last_shot),
"...The tension is rising..."]
players = []
if auto_call:
largest_num_in_voice = 0
for channel in ctx.get_all_channels():
if str(channel.type) == "voice" and len(channel.voice_members) \
>= largest_num_in_voice:
largest_num_in_voice = len(channel.voice_members)
channel_to_use = channel
for m in channel_to_use.voice_members:
players.append(m.display_name)
if not auto_call or len(players) < 1:
for m in ctx.get_all_members():
if str(m.status) == 'online' and str(m.display_name) \
!= 'brochat-bot':
players.append(m.display_name)
output.append("{} entered in the SHOT LOTTERY good luck!"
.format(", ".join(players)))
players.append('SOCIAL!')
output.append("...Who will it be!?!?")
output.append("Selecting a random number between 0 and {}"
.format(len(players) - 1))
winner = randint(0, len(players) - 1)
if players[winner] != 'SOCIAL!':
common.add_drink(players[winner])
for m in ctx.get_all_members():
if str(m.display_name) == players[winner]:
tag_id = m.mention
break
output.append("The winning number is {}, Congrats {} you WIN!\n"
":beers: Take your shot!".format(winner, tag_id))
consecutive = common.whos_in.add_shot_win(players[winner])
if consecutive > 1:
output.append("That's {} in a row!".format(consecutive))
else:
output.append("The winning number is {}".format(winner))
output.append("Ah shit! ITS A SOCIAL! SHOTS! SHOTS! SHOTS!")
output.append("{}{}{}".format(glass, glass, glass))
players.pop(winner)
for player in players:
common.add_drink(player)
return output
def in_deep_debt(player):
"""
Checks if a player is in deep debt
:param player: Player to check
:return: True if player has too much debt
"""
if "drinks_owed" not in common.users[player]:
return False
else:
return common.users[player]['drinks_owed'] >= 5
def consume_drink(user):
"""
Consumes a drink for the user.
:param user: user's display name
:return:
"""
if "drinks_owed" in common.users[user]:
common.users[user]['drinks_owed'] -= 1
else:
common.users[user]['drinks_owed'] = -1
return common.users[user]['drinks_owed']
def setup(bot):
bot.add_cog(DrinkBank(bot))
| {
"content_hash": "7ffe38c7f81d49c12b19fe93c0235078",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 80,
"avg_line_length": 35.839506172839506,
"alnum_prop": 0.5470203238029625,
"repo_name": "nluedtke/brochat-bot",
"id": "d36ae3aaf63948ec98bfa28c2c705fa0a7be39af",
"size": "5806",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cogs/drinkingcog.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "438"
},
{
"name": "Python",
"bytes": "170182"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.conf.urls import patterns, include, url
def autodiscover():
urlpatterns = patterns('')
if 'grappelli' in settings.INSTALLED_APPS:
urlpatterns += patterns('',
url(r'^grappelli/', include('grappelli.urls')),
)
if 'filebrowser' in settings.INSTALLED_APPS:
from filebrowser.sites import site as filebrowser_site
#filebrowser_site.storage = import_by_path(settings.FILEBROWSER_STORAGE)()
urlpatterns += patterns('',
url(r'^admin/filebrowser/', include(filebrowser_site.urls)),
)
if 'ckeditor' in settings.INSTALLED_APPS:
urlpatterns += patterns('',
url(r'^ckeditor/', include('ckeditor.urls')),
)
if 'amanda.articles' in settings.INSTALLED_APPS:
urlpatterns += patterns('',
url(r'^blog/', include('amanda.articles.urls', namespace='articles')),
)
if 'amanda.faq' in settings.INSTALLED_APPS:
urlpatterns += patterns('',
url(r'^faq/', include('amanda.faq.urls', namespace='faq')),
)
if 'amanda.products' in settings.INSTALLED_APPS:
urlpatterns += patterns('',
url(r'^products/', include('amanda.product.urls.product', namespace='products')),
)
if 'amanda.resources' in settings.INSTALLED_APPS:
urlpatterns += patterns('',
url(r'^resources/',include('amanda.resources.urls', namespace='resources')),
)
if 'amanda.search' in settings.INSTALLED_APPS:
urlpatterns += patterns('',
url(r'^search/', include('amanda.search.urls', namespace='search')),
)
if 'amanda.flatpages' in settings.INSTALLED_APPS:
urlpatterns += patterns('',
url(r'^', include('amanda.flatpages.urls', namespace='flatpages')),
)
return urlpatterns
| {
"content_hash": "c619637351124f7f05eddb996619cb0f",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 93,
"avg_line_length": 34.10909090909091,
"alnum_prop": 0.6092750533049041,
"repo_name": "AmandaCMS/amanda-cms",
"id": "8cbec014d0daca331af773071e94460793fa0f63",
"size": "1876",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "amanda/core/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CoffeeScript",
"bytes": "456"
},
{
"name": "Python",
"bytes": "311130"
}
],
"symlink_target": ""
} |
from builtins import object
class Module(object):
def __init__(self, mainMenu, params=[]):
# metadata info about the module, not modified during runtime
self.info = {
# name for the module that will appear in module menus
'Name': 'Change Login Message for the user.',
# list of one or more authors for the module
'Author': ['@424f424f'],
# more verbose multi-line description of the module
'Description': 'Change the login message for the user.',
'Software': '',
'Techniques': ['T1491'],
# True if the module needs to run in the background
'Background' : False,
# File extension to save the file as
'OutputExtension' : "",
# if the module needs administrative privileges
'NeedsAdmin' : False,
# True if the method doesn't touch disk/is reasonably opsec safe
'OpsecSafe' : False,
# the module language
'Language' : 'python',
# the minimum language version needed
'MinLanguageVersion' : '2.6',
# list of any references/other comments
'Comments': ['']
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Agent to run on.',
'Required' : True,
'Value' : ''
},
'Image' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Location of the image to use.',
'Required' : True,
'Value' : ''
},
'Desktop' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'True/False to change the desktop background.',
'Required' : False,
'Value' : 'False'
},
'Login' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'True/False to change the login background.',
'Required' : False,
'Value' : 'False'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
# During instantiation, any settable option parameters
# are passed as an object set to the module and the
# options dictionary is automatically set. This is mostly
# in case options are passed on the command line
if params:
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
image = self.options['Image']['Value']
desktop = self.options['Desktop']['Value']
login = self.options['Login']['Value']
# the Python script itself, with the command to invoke
# for execution appended to the end. Scripts should output
# everything to the pipeline for proper parsing.
#
# the script should be stripped of comments, with a link to any
# original reference script included in the comments.
script = """
import subprocess
desktop = %s
login = %s
if desktop == True:
try:
cmd = \"""osascript -e 'tell application "Finder" to set desktop picture to "%s" as POSIX file'""\"
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
print("Desktop background changed!")
except Exception as e:
print("Changing desktop background failed")
print(e)
if login == True:
try:
cmd = \"""cp %s /Library/Caches/com.apple.desktop.admin.png""\"
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
print("Login background changed!")
except Exception as e:
print("Changing login background failed")
print(e)
""" % (desktop, login, image, image)
return script
| {
"content_hash": "aafa59505d1d9b60397544c26e0d83bb",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 107,
"avg_line_length": 36.08730158730159,
"alnum_prop": 0.5425555311194193,
"repo_name": "byt3bl33d3r/Empire",
"id": "c68f757d0fe5acfc50c2690d2b37bb12c78852e2",
"size": "4547",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/modules/python/trollsploit/osx/change_background.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1966"
},
{
"name": "Java",
"bytes": "496"
},
{
"name": "Objective-C",
"bytes": "2664"
},
{
"name": "PHP",
"bytes": "2198"
},
{
"name": "PowerShell",
"bytes": "16998705"
},
{
"name": "Python",
"bytes": "2789955"
},
{
"name": "Shell",
"bytes": "10123"
}
],
"symlink_target": ""
} |
from office365.runtime.paths.resource_path import ResourcePath
from office365.sharepoint.base_entity import BaseEntity
from office365.sharepoint.permissions.roles.definitions.collection import RoleDefinitionCollection
from office365.sharepoint.principal.principal import Principal
class RoleAssignment(BaseEntity):
"""An association between a principal or a site group and a role definition."""
@property
def principal_id(self):
""""""
return self.properties.get("PrincipalId", None)
@property
def member(self):
"""Specifies the user or group corresponding to the role assignment."""
return self.properties.get("Member",
Principal(self.context, ResourcePath("Member", self.resource_path)))
@property
def role_definition_bindings(self):
"""Specifies a collection of role definitions for this role assignment."""
return self.properties.get("RoleDefinitionBindings",
RoleDefinitionCollection(self.context,
ResourcePath("RoleDefinitionBindings",
self.resource_path)))
def get_property(self, name, default_value=None):
if default_value is None:
property_mapping = {
"RoleDefinitionBindings": self.role_definition_bindings,
}
default_value = property_mapping.get(name, None)
return super(RoleAssignment, self).get_property(name, default_value)
def set_property(self, name, value, persist_changes=True):
super(RoleAssignment, self).set_property(name, value, persist_changes)
if self._resource_path is None:
if name == "PrincipalId":
self._resource_path = self.parent_collection.get_by_principal_id(value).resource_path
return self
| {
"content_hash": "28bf149528bdba1c24ba91d6d0a4456e",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 103,
"avg_line_length": 46.142857142857146,
"alnum_prop": 0.631578947368421,
"repo_name": "vgrem/Office365-REST-Python-Client",
"id": "76af69799afe190029f1c4951610e859746d782f",
"size": "1938",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "office365/sharepoint/permissions/roles/assignments/assignment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1659292"
}
],
"symlink_target": ""
} |
"""Preview for Zinnia"""
from __future__ import division
from django.utils import six
from django.utils.text import Truncator
from django.utils.html import strip_tags
from django.utils.functional import cached_property
from django.utils.encoding import python_2_unicode_compatible
from bs4 import BeautifulSoup
from zinnia.settings import PREVIEW_SPLITTERS
from zinnia.settings import PREVIEW_MAX_WORDS
from zinnia.settings import PREVIEW_MORE_STRING
@python_2_unicode_compatible
class HTMLPreview(object):
"""
Build an HTML preview of an HTML content.
"""
def __init__(self, content,
splitters=PREVIEW_SPLITTERS,
max_words=PREVIEW_MAX_WORDS,
more_string=PREVIEW_MORE_STRING):
self._preview = None
self.content = content
self.splitters = splitters
self.max_words = max_words
self.more_string = more_string
@property
def preview(self):
"""
The preview is a cached property.
"""
if self._preview is None:
self._preview = self.build_preview()
return self._preview
@property
def has_more(self):
"""
Boolean telling if the preview has hidden content.
"""
return self.preview != self.content
def __str__(self):
"""
Method used to render the preview in templates.
"""
return six.text_type(self.preview)
def build_preview(self):
"""
Build the preview by:
- Checking if a split marker is present in the content
Then split the content with the marker to build the preview.
- Splitting the content to a fixed number of words.
"""
for splitter in self.splitters:
if splitter in self.content:
return self.split(splitter)
return self.truncate()
def truncate(self):
"""
Truncate the content with the Truncator object.
"""
return Truncator(self.content).words(
self.max_words, self.more_string, html=True)
def split(self, splitter):
"""
Split the HTML content with a marker
without breaking closing markups.
"""
soup = BeautifulSoup(self.content.split(splitter)[0],
'html.parser')
last_string = soup.find_all(text=True)[-1]
last_string.replace_with(last_string.string + self.more_string)
return soup
@cached_property
def total_words(self):
"""
Return the total of words contained in the content.
"""
return len(strip_tags(self.content).split())
@cached_property
def displayed_words(self):
"""
Return the number of words displayed in the preview.
"""
return (len(strip_tags(self.preview).split()) -
len(self.more_string.split()))
@cached_property
def remaining_words(self):
"""
Return the number of words remaining after the preview.
"""
return self.total_words - self.displayed_words
@cached_property
def displayed_percent(self):
"""
Return the percentage of the content displayed in the preview.
"""
return (self.displayed_words / self.total_words) * 100
@cached_property
def remaining_percent(self):
"""
Return the percentage of the content remaining after the preview.
"""
return (self.remaining_words / self.total_words) * 100
| {
"content_hash": "61048a30bb606ab428bf85f2cae2ba85",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 73,
"avg_line_length": 29.458333333333332,
"alnum_prop": 0.6084865629420085,
"repo_name": "mfalcon/edujango",
"id": "fd5f657ae164cfe70923de8b79c4139f5f097483",
"size": "3535",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "zinnia/preview.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1422089"
},
{
"name": "HTML",
"bytes": "1793045"
},
{
"name": "JavaScript",
"bytes": "8694110"
},
{
"name": "PHP",
"bytes": "2156"
},
{
"name": "Python",
"bytes": "885118"
},
{
"name": "Ruby",
"bytes": "249"
}
],
"symlink_target": ""
} |
from unittest.mock import patch
import graphene
from saleor.core.utils import get_country_name_by_code
from saleor.graphql.payment.enums import (
OrderAction, PaymentChargeStatusEnum, PaymentGatewayEnum)
from saleor.payment.models import ChargeStatus, Payment, TransactionKind
from tests.api.utils import get_graphql_content
VOID_QUERY = """
mutation PaymentVoid($paymentId: ID!) {
paymentVoid(paymentId: $paymentId) {
payment {
id,
chargeStatus
}
errors {
field
message
}
}
}
"""
def test_payment_void_success(
staff_api_client, permission_manage_orders, payment_txn_preauth):
assert payment_txn_preauth.charge_status == ChargeStatus.NOT_CHARGED
payment_id = graphene.Node.to_global_id(
'Payment', payment_txn_preauth.pk)
variables = {'paymentId': payment_id}
response = staff_api_client.post_graphql(
VOID_QUERY, variables, permissions=[permission_manage_orders])
content = get_graphql_content(response)
data = content['data']['paymentVoid']
assert not data['errors']
payment_txn_preauth.refresh_from_db()
assert payment_txn_preauth.is_active is False
assert payment_txn_preauth.transactions.count() == 2
txn = payment_txn_preauth.transactions.last()
assert txn.kind == TransactionKind.VOID
def test_payment_void_gateway_error(
staff_api_client, permission_manage_orders, payment_txn_preauth,
monkeypatch):
assert payment_txn_preauth.charge_status == ChargeStatus.NOT_CHARGED
payment_id = graphene.Node.to_global_id(
'Payment', payment_txn_preauth.pk)
variables = {'paymentId': payment_id}
monkeypatch.setattr(
'saleor.payment.gateways.dummy.dummy_success', lambda: False)
response = staff_api_client.post_graphql(
VOID_QUERY, variables, permissions=[permission_manage_orders])
content = get_graphql_content(response)
data = content['data']['paymentVoid']
assert data['errors']
assert data['errors'][0]['field'] is None
assert data['errors'][0]['message'] == 'Unable to void the transaction.'
payment_txn_preauth.refresh_from_db()
assert payment_txn_preauth.charge_status == ChargeStatus.NOT_CHARGED
assert payment_txn_preauth.is_active is True
assert payment_txn_preauth.transactions.count() == 2
txn = payment_txn_preauth.transactions.last()
assert txn.kind == TransactionKind.VOID
assert not txn.is_success
CREATE_QUERY = """
mutation CheckoutPaymentCreate($checkoutId: ID!, $input: PaymentInput!) {
checkoutPaymentCreate(checkoutId: $checkoutId, input: $input) {
payment {
transactions {
kind,
token
}
chargeStatus
}
errors {
field
message
}
}
}
"""
def test_checkout_add_payment(
user_api_client, checkout_with_item, graphql_address_data):
checkout = checkout_with_item
checkout_id = graphene.Node.to_global_id('Checkout', checkout.pk)
variables = {
'checkoutId': checkout_id,
'input': {
'gateway': 'DUMMY',
'token': 'sample-token',
'amount': str(checkout.get_total().gross.amount),
'billingAddress': graphql_address_data}}
response = user_api_client.post_graphql(CREATE_QUERY, variables)
content = get_graphql_content(response)
data = content['data']['checkoutPaymentCreate']
assert not data['errors']
transactions = data['payment']['transactions']
assert not transactions
payment = Payment.objects.get()
assert payment.checkout == checkout
assert payment.is_active
assert payment.token == 'sample-token'
total = checkout.get_total().gross
assert payment.total == total.amount
assert payment.currency == total.currency
assert payment.charge_status == ChargeStatus.NOT_CHARGED
def test_use_checkout_billing_address_as_payment_billing(
user_api_client, checkout_with_item, address):
checkout = checkout_with_item
checkout_id = graphene.Node.to_global_id('Checkout', checkout.pk)
variables = {
'checkoutId': checkout_id,
'input': {
'gateway': 'DUMMY',
'token': 'sample-token',
'amount': str(checkout.get_total().gross.amount)}}
response = user_api_client.post_graphql(CREATE_QUERY, variables)
content = get_graphql_content(response)
data = content['data']['checkoutPaymentCreate']
# check if proper error is returned if address is missing
assert data['errors'][0]['field'] == 'billingAddress'
# assign the address and try again
address.street_address_1 = 'spanish-inqusition'
address.save()
checkout.billing_address = address
checkout.save()
response = user_api_client.post_graphql(CREATE_QUERY, variables)
content = get_graphql_content(response)
data = content['data']['checkoutPaymentCreate']
checkout.refresh_from_db()
assert checkout.payments.count() == 1
payment = checkout.payments.first()
assert payment.billing_address_1 == address.street_address_1
CAPTURE_QUERY = """
mutation PaymentCapture($paymentId: ID!, $amount: Decimal!) {
paymentCapture(paymentId: $paymentId, amount: $amount) {
payment {
id,
chargeStatus
}
errors {
field
message
}
}
}
"""
def test_payment_capture_success(
staff_api_client, permission_manage_orders, payment_txn_preauth):
payment = payment_txn_preauth
assert payment.charge_status == ChargeStatus.NOT_CHARGED
payment_id = graphene.Node.to_global_id(
'Payment', payment_txn_preauth.pk)
variables = {
'paymentId': payment_id,
'amount': str(payment_txn_preauth.total)}
response = staff_api_client.post_graphql(
CAPTURE_QUERY, variables, permissions=[permission_manage_orders])
content = get_graphql_content(response)
data = content['data']['paymentCapture']
assert not data['errors']
payment_txn_preauth.refresh_from_db()
assert payment.charge_status == ChargeStatus.FULLY_CHARGED
assert payment.transactions.count() == 2
txn = payment.transactions.last()
assert txn.kind == TransactionKind.CAPTURE
def test_payment_capture_with_invalid_argument(
staff_api_client, permission_manage_orders, payment_txn_preauth):
payment = payment_txn_preauth
assert payment.charge_status == ChargeStatus.NOT_CHARGED
payment_id = graphene.Node.to_global_id(
'Payment', payment_txn_preauth.pk)
variables = {
'paymentId': payment_id,
'amount': 0}
response = staff_api_client.post_graphql(
CAPTURE_QUERY, variables, permissions=[permission_manage_orders])
content = get_graphql_content(response)
data = content['data']['paymentCapture']
assert len(data['errors']) == 1
assert data['errors'][0]['message'] == \
'Amount should be a positive number.'
def test_payment_capture_gateway_error(
staff_api_client, permission_manage_orders, payment_txn_preauth,
monkeypatch):
payment = payment_txn_preauth
assert payment.charge_status == ChargeStatus.NOT_CHARGED
payment_id = graphene.Node.to_global_id(
'Payment', payment_txn_preauth.pk)
variables = {
'paymentId': payment_id,
'amount': str(payment_txn_preauth.total)}
monkeypatch.setattr(
'saleor.payment.gateways.dummy.dummy_success', lambda: False)
response = staff_api_client.post_graphql(
CAPTURE_QUERY, variables, permissions=[permission_manage_orders])
content = get_graphql_content(response)
data = content['data']['paymentCapture']
assert data['errors']
assert data['errors'][0]['field'] is None
assert data['errors'][0]['message']
payment_txn_preauth.refresh_from_db()
assert payment.charge_status == ChargeStatus.NOT_CHARGED
assert payment.transactions.count() == 2
txn = payment.transactions.last()
assert txn.kind == TransactionKind.CAPTURE
assert not txn.is_success
REFUND_QUERY = """
mutation PaymentRefund($paymentId: ID!, $amount: Decimal!) {
paymentRefund(paymentId: $paymentId, amount: $amount) {
payment {
id,
chargeStatus
}
errors {
field
message
}
}
}
"""
def test_payment_refund_success(
staff_api_client, permission_manage_orders, payment_txn_captured):
payment = payment_txn_captured
payment.charge_status = ChargeStatus.FULLY_CHARGED
payment.captured_amount = payment.total
payment.save()
payment_id = graphene.Node.to_global_id(
'Payment', payment.pk)
variables = {
'paymentId': payment_id,
'amount': str(payment.total)}
response = staff_api_client.post_graphql(
REFUND_QUERY, variables, permissions=[permission_manage_orders])
content = get_graphql_content(response)
data = content['data']['paymentRefund']
assert not data['errors']
payment.refresh_from_db()
assert payment.charge_status == ChargeStatus.FULLY_REFUNDED
assert payment.transactions.count() == 2
txn = payment.transactions.last()
assert txn.kind == TransactionKind.REFUND
def test_payment_refund_with_invalid_argument(
staff_api_client, permission_manage_orders, payment_txn_captured):
payment = payment_txn_captured
payment.charge_status = ChargeStatus.FULLY_CHARGED
payment.captured_amount = payment.total
payment.save()
payment_id = graphene.Node.to_global_id(
'Payment', payment.pk)
variables = {
'paymentId': payment_id,
'amount': 0}
response = staff_api_client.post_graphql(
REFUND_QUERY, variables, permissions=[permission_manage_orders])
content = get_graphql_content(response)
data = content['data']['paymentRefund']
assert len(data['errors']) == 1
assert data['errors'][0]['message'] == \
'Amount should be a positive number.'
def test_payment_refund_error(
staff_api_client, permission_manage_orders, payment_txn_captured,
monkeypatch):
payment = payment_txn_captured
payment.charge_status = ChargeStatus.FULLY_CHARGED
payment.captured_amount = payment.total
payment.save()
payment_id = graphene.Node.to_global_id(
'Payment', payment.pk)
variables = {
'paymentId': payment_id,
'amount': str(payment.total)}
monkeypatch.setattr(
'saleor.payment.gateways.dummy.dummy_success', lambda: False)
response = staff_api_client.post_graphql(
REFUND_QUERY, variables, permissions=[permission_manage_orders])
content = get_graphql_content(response)
data = content['data']['paymentRefund']
assert data['errors']
assert data['errors'][0]['field'] is None
assert data['errors'][0]['message']
payment.refresh_from_db()
assert payment.charge_status == ChargeStatus.FULLY_CHARGED
assert payment.transactions.count() == 2
txn = payment.transactions.last()
assert txn.kind == TransactionKind.REFUND
assert not txn.is_success
def test_payments_query(
payment_txn_captured, permission_manage_orders, staff_api_client):
query = """ {
payments(first: 20) {
edges {
node {
id
gateway
capturedAmount {
amount
currency
}
total {
amount
currency
}
actions
chargeStatus
billingAddress {
country {
code
country
}
firstName
lastName
cityArea
countryArea
city
companyName
streetAddress1
streetAddress2
postalCode
}
transactions {
amount {
currency
amount
}
}
creditCard {
expMonth
expYear
brand
firstDigits
lastDigits
}
}
}
}
}
"""
response = staff_api_client.post_graphql(
query, permissions=[permission_manage_orders])
content = get_graphql_content(response)
data = content['data']['payments']['edges'][0]['node']
pay = payment_txn_captured
assert data['gateway'] == pay.gateway
assert data['capturedAmount'] == {
'amount': pay.captured_amount, 'currency': pay.currency}
assert data['total'] == {'amount': pay.total, 'currency': pay.currency}
assert data['chargeStatus'] == PaymentChargeStatusEnum.FULLY_CHARGED.name
assert data['billingAddress'] == {
'firstName': pay.billing_first_name,
'lastName': pay.billing_last_name,
'city': pay.billing_city,
'cityArea': pay.billing_city_area,
'countryArea': pay.billing_country_area,
'companyName': pay.billing_company_name,
'streetAddress1': pay.billing_address_1,
'streetAddress2': pay.billing_address_2,
'postalCode': pay.billing_postal_code,
'country': {
'code': pay.billing_country_code,
'country': get_country_name_by_code(pay.billing_country_code)
}
}
assert data['actions'] == [OrderAction.REFUND.name]
txn = pay.transactions.get()
assert data['transactions'] == [{
'amount': {
'currency': pay.currency,
'amount': float(str(txn.amount))}}]
assert data['creditCard'] == {
'expMonth': pay.cc_exp_month,
'expYear': pay.cc_exp_year,
'brand': pay.cc_brand,
'firstDigits': pay.cc_first_digits,
'lastDigits': pay.cc_last_digits}
def test_query_payment(
payment_dummy, user_api_client, permission_manage_orders):
query = """
query payment($id: ID) {
payment(id: $id) {
id
}
}
"""
payment = payment_dummy
payment_id = graphene.Node.to_global_id('Payment', payment.pk)
variables = {'id': payment_id}
response = user_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders])
content = get_graphql_content(response)
received_id = content['data']['payment']['id']
assert received_id == payment_id
def test_query_payments(
payment_dummy, permission_manage_orders, staff_api_client):
query = """
{
payments(first: 20) {
edges {
node {
id
}
}
}
}
"""
payment = payment_dummy
payment_id = graphene.Node.to_global_id('Payment', payment.pk)
response = staff_api_client.post_graphql(
query, {}, permissions=[permission_manage_orders])
content = get_graphql_content(response)
edges = content['data']['payments']['edges']
payment_ids = [edge['node']['id'] for edge in edges]
assert payment_ids == [payment_id]
@patch('saleor.graphql.payment.resolvers.gateway_get_client_token')
def test_query_payment_client_token(mock_get_client_token, user_api_client):
query = """
query paymentClientToken($gateway: GatewaysEnum) {
paymentClientToken(gateway: $gateway)
}
"""
example_token = 'example-token'
mock_get_client_token.return_value = example_token
variables = {'gateway': PaymentGatewayEnum.BRAINTREE.name}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
assert mock_get_client_token.called_once_with(
PaymentGatewayEnum.BRAINTREE.name)
token = content['data']['paymentClientToken']
assert token == example_token
| {
"content_hash": "ee950c73462bc4e6c6733e06f2a356b0",
"timestamp": "",
"source": "github",
"line_count": 472,
"max_line_length": 77,
"avg_line_length": 34.86652542372882,
"alnum_prop": 0.6083125721577445,
"repo_name": "UITools/saleor",
"id": "3a0eb4514311010286932d2642f80e3e01e4e862",
"size": "16457",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/api/test_payment.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "96006"
},
{
"name": "Dockerfile",
"bytes": "1859"
},
{
"name": "HTML",
"bytes": "556961"
},
{
"name": "JavaScript",
"bytes": "64679"
},
{
"name": "Python",
"bytes": "2316144"
},
{
"name": "Shell",
"bytes": "1265"
},
{
"name": "TypeScript",
"bytes": "2526265"
}
],
"symlink_target": ""
} |
import numpy as np
from dipy.utils.optpkg import optional_package
import itertools
from dipy.viz.gmem import GlobalHorizon
fury, have_fury, setup_module = optional_package('fury')
if have_fury:
from dipy.viz import actor, ui, colormap
def build_label(text, font_size=18, bold=False):
""" Simple utility function to build labels
Parameters
----------
text : str
font_size : int
bold : bool
Returns
-------
label : TextBlock2D
"""
label = ui.TextBlock2D()
label.message = text
label.font_size = font_size
label.font_family = 'Arial'
label.justification = 'left'
label.bold = bold
label.italic = False
label.shadow = False
label.actor.GetTextProperty().SetBackgroundColor(0, 0, 0)
label.actor.GetTextProperty().SetBackgroundOpacity(0.0)
label.color = (0.7, 0.7, 0.7)
return label
def _color_slider(slider):
slider.default_color = (1, 0.5, 0)
slider.track.color = (0.8, 0.3, 0)
slider.active_color = (0.9, 0.4, 0)
slider.handle.color = (1, 0.5, 0)
def _color_dslider(slider):
slider.default_color = (1, 0.5, 0)
slider.track.color = (0.8, 0.3, 0)
slider.active_color = (0.9, 0.4, 0)
slider.handles[0].color = (1, 0.5, 0)
slider.handles[1].color = (1, 0.5, 0)
def slicer_panel(renderer, iren,
data=None, affine=None,
world_coords=False,
pam=None, mask=None, mem=GlobalHorizon()):
""" Slicer panel with slicer included
Parameters
----------
renderer : Renderer
iren : Interactor
data : 3d ndarray
affine : 4x4 ndarray
world_coords : bool
If True then the affine is applied.
peaks : PeaksAndMetrics
Default None
mem :
Returns
-------
panel : Panel
"""
orig_shape = data.shape
print('Original shape', orig_shape)
ndim = data.ndim
tmp = data
if ndim == 4:
if orig_shape[-1] > 3:
tmp = data[..., 0]
orig_shape = orig_shape[:3]
value_range = np.percentile(data[..., 0], q=[2, 98])
if orig_shape[-1] == 3:
value_range = (0, 1.)
mem.slicer_rgb = True
if ndim == 3:
value_range = np.percentile(tmp, q=[2, 98])
if not world_coords:
affine = np.eye(4)
image_actor_z = actor.slicer(tmp, affine=affine, value_range=value_range,
interpolation='nearest', picking_tol=0.025)
tmp_new = image_actor_z.resliced_array()
if len(data.shape) == 4:
if data.shape[-1] == 3:
print('Resized to RAS shape ', tmp_new.shape)
else:
print('Resized to RAS shape ', tmp_new.shape + (data.shape[-1],))
else:
print('Resized to RAS shape ', tmp_new.shape)
shape = tmp_new.shape
if pam is not None:
peaks_actor_z = actor.peak_slicer(pam.peak_dirs, None,
mask=mask, affine=affine,
colors=None)
slicer_opacity = 1.
image_actor_z.opacity(slicer_opacity)
image_actor_x = image_actor_z.copy()
x_midpoint = int(np.round(shape[0] / 2))
image_actor_x.display_extent(x_midpoint,
x_midpoint, 0,
shape[1] - 1,
0,
shape[2] - 1)
image_actor_y = image_actor_z.copy()
y_midpoint = int(np.round(shape[1] / 2))
image_actor_y.display_extent(0,
shape[0] - 1,
y_midpoint,
y_midpoint,
0,
shape[2] - 1)
renderer.add(image_actor_z)
renderer.add(image_actor_x)
renderer.add(image_actor_y)
if pam is not None:
renderer.add(peaks_actor_z)
line_slider_z = ui.LineSlider2D(min_value=0,
max_value=shape[2] - 1,
initial_value=shape[2] / 2,
text_template="{value:.0f}",
length=140)
_color_slider(line_slider_z)
def change_slice_z(slider):
z = int(np.round(slider.value))
mem.slicer_curr_actor_z.display_extent(0, shape[0] - 1,
0, shape[1] - 1, z, z)
if pam is not None:
mem.slicer_peaks_actor_z.display_extent(0, shape[0] - 1,
0, shape[1] - 1, z, z)
mem.slicer_curr_z = z
renderer.reset_clipping_range()
line_slider_x = ui.LineSlider2D(min_value=0,
max_value=shape[0] - 1,
initial_value=shape[0] / 2,
text_template="{value:.0f}",
length=140)
_color_slider(line_slider_x)
def change_slice_x(slider):
x = int(np.round(slider.value))
mem.slicer_curr_actor_x.display_extent(x, x, 0, shape[1] - 1, 0,
shape[2] - 1)
renderer.reset_clipping_range()
mem.slicer_curr_x = x
mem.window_timer_cnt += 100
line_slider_y = ui.LineSlider2D(min_value=0,
max_value=shape[1] - 1,
initial_value=shape[1] / 2,
text_template="{value:.0f}",
length=140)
_color_slider(line_slider_y)
def change_slice_y(slider):
y = int(np.round(slider.value))
mem.slicer_curr_actor_y.display_extent(0, shape[0] - 1, y, y,
0, shape[2] - 1)
renderer.reset_clipping_range()
mem.slicer_curr_y = y
# TODO there is some small bug when starting the app the handles
# are sitting a bit low
double_slider = ui.LineDoubleSlider2D(length=140,
initial_values=value_range,
min_value=tmp.min(),
max_value=tmp.max(),
shape='square')
_color_dslider(double_slider)
def apply_colormap(r1, r2):
if mem.slicer_rgb:
return
if mem.slicer_colormap == 'disting':
# use distinguishable colors
rgb = colormap.distinguishable_colormap(nb_colors=256)
rgb = np.asarray(rgb)
else:
# use matplotlib colormaps
rgb = colormap.create_colormap(np.linspace(r1, r2, 256),
name=mem.slicer_colormap,
auto=True)
N = rgb.shape[0]
lut = colormap.vtk.vtkLookupTable()
lut.SetNumberOfTableValues(N)
lut.SetRange(r1, r2)
for i in range(N):
r, g, b = rgb[i]
lut.SetTableValue(i, r, g, b)
lut.SetRampToLinear()
lut.Build()
mem.slicer_curr_actor_z.output.SetLookupTable(lut)
mem.slicer_curr_actor_z.output.Update()
def on_change_ds(slider):
values = slider._values
r1, r2 = values
apply_colormap(r1, r2)
# TODO trying to see why there is a small bug in double slider
# double_slider.left_disk_value = 0
# double_slider.right_disk_value = 98
# double_slider.update(0)
# double_slider.update(1)
double_slider.on_change = on_change_ds
opacity_slider = ui.LineSlider2D(min_value=0.0,
max_value=1.0,
initial_value=slicer_opacity,
length=140,
text_template="{ratio:.0%}")
_color_slider(opacity_slider)
def change_opacity(slider):
slicer_opacity = slider.value
mem.slicer_curr_actor_x.opacity(slicer_opacity)
mem.slicer_curr_actor_y.opacity(slicer_opacity)
mem.slicer_curr_actor_z.opacity(slicer_opacity)
volume_slider = ui.LineSlider2D(min_value=0,
max_value=data.shape[-1] - 1,
initial_value=0,
length=140,
text_template="{value:.0f}",
shape='square')
_color_slider(volume_slider)
def change_volume(istyle, obj, slider):
vol_idx = int(np.round(slider.value))
mem.slicer_vol_idx = vol_idx
renderer.rm(mem.slicer_curr_actor_x)
renderer.rm(mem.slicer_curr_actor_y)
renderer.rm(mem.slicer_curr_actor_z)
tmp = data[..., vol_idx]
image_actor_z = actor.slicer(tmp,
affine=affine,
value_range=value_range,
interpolation='nearest',
picking_tol=0.025)
tmp_new = image_actor_z.resliced_array()
mem.slicer_vol = tmp_new
z = mem.slicer_curr_z
image_actor_z.display_extent(0, shape[0] - 1,
0, shape[1] - 1,
z,
z)
mem.slicer_curr_actor_z = image_actor_z
mem.slicer_curr_actor_x = image_actor_z.copy()
if pam is not None:
mem.slicer_peaks_actor_z = peaks_actor_z
x = mem.slicer_curr_x
mem.slicer_curr_actor_x.display_extent(x,
x, 0,
shape[1] - 1, 0,
shape[2] - 1)
mem.slicer_curr_actor_y = image_actor_z.copy()
y = mem.slicer_curr_y
mem.slicer_curr_actor_y.display_extent(0, shape[0] - 1,
y,
y,
0, shape[2] - 1)
mem.slicer_curr_actor_z.AddObserver('LeftButtonPressEvent',
left_click_picker_callback,
1.0)
mem.slicer_curr_actor_x.AddObserver('LeftButtonPressEvent',
left_click_picker_callback,
1.0)
mem.slicer_curr_actor_y.AddObserver('LeftButtonPressEvent',
left_click_picker_callback,
1.0)
renderer.add(mem.slicer_curr_actor_z)
renderer.add(mem.slicer_curr_actor_x)
renderer.add(mem.slicer_curr_actor_y)
if pam is not None:
renderer.add(mem.slicer_peaks_actor_z)
r1, r2 = double_slider._values
apply_colormap(r1, r2)
istyle.force_render()
def left_click_picker_callback(obj, ev):
''' Get the value of the clicked voxel and show it in the panel.'''
event_pos = iren.GetEventPosition()
obj.picker.Pick(event_pos[0],
event_pos[1],
0,
renderer)
i, j, k = obj.picker.GetPointIJK()
res = mem.slicer_vol[i, j, k]
try:
message = '%.3f' % res
except TypeError:
message = '%.3f %.3f %.3f' % (res[0], res[1], res[2])
picker_label.message = '({}, {}, {})'.format(str(i), str(j), str(k)) \
+ ' ' + message
mem.slicer_vol_idx = 0
mem.slicer_vol = tmp_new
mem.slicer_curr_actor_x = image_actor_x
mem.slicer_curr_actor_y = image_actor_y
mem.slicer_curr_actor_z = image_actor_z
if pam is not None:
# change_volume.peaks_actor_z = peaks_actor_z
mem.slicer_peaks_actor_z = peaks_actor_z
mem.slicer_curr_actor_x.AddObserver('LeftButtonPressEvent',
left_click_picker_callback,
1.0)
mem.slicer_curr_actor_y.AddObserver('LeftButtonPressEvent',
left_click_picker_callback,
1.0)
mem.slicer_curr_actor_z.AddObserver('LeftButtonPressEvent',
left_click_picker_callback,
1.0)
if pam is not None:
mem.slicer_peaks_actor_z.AddObserver('LeftButtonPressEvent',
left_click_picker_callback,
1.0)
mem.slicer_curr_x = int(np.round(shape[0] / 2))
mem.slicer_curr_y = int(np.round(shape[1] / 2))
mem.slicer_curr_z = int(np.round(shape[2] / 2))
line_slider_x.on_change = change_slice_x
line_slider_y.on_change = change_slice_y
line_slider_z.on_change = change_slice_z
double_slider.on_change = on_change_ds
opacity_slider.on_change = change_opacity
volume_slider.handle_events(volume_slider.handle.actor)
volume_slider.on_left_mouse_button_released = change_volume
line_slider_label_x = build_label(text="X Slice")
line_slider_label_x.visibility = True
x_counter = itertools.count()
def label_callback_x(obj, event):
line_slider_label_x.visibility = not line_slider_label_x.visibility
line_slider_x.set_visibility(line_slider_label_x.visibility)
cnt = next(x_counter)
if line_slider_label_x.visibility and cnt > 0:
renderer.add(mem.slicer_curr_actor_x)
else:
renderer.rm(mem.slicer_curr_actor_x)
iren.Render()
line_slider_label_x.actor.AddObserver('LeftButtonPressEvent',
label_callback_x,
1.0)
line_slider_label_y = build_label(text="Y Slice")
line_slider_label_y.visibility = True
y_counter = itertools.count()
def label_callback_y(obj, event):
line_slider_label_y.visibility = not line_slider_label_y.visibility
line_slider_y.set_visibility(line_slider_label_y.visibility)
cnt = next(y_counter)
if line_slider_label_y.visibility and cnt > 0:
renderer.add(mem.slicer_curr_actor_y)
else:
renderer.rm(mem.slicer_curr_actor_y)
iren.Render()
line_slider_label_y.actor.AddObserver('LeftButtonPressEvent',
label_callback_y,
1.0)
line_slider_label_z = build_label(text="Z Slice")
line_slider_label_z.visibility = True
z_counter = itertools.count()
def label_callback_z(obj, event):
line_slider_label_z.visibility = not line_slider_label_z.visibility
line_slider_z.set_visibility(line_slider_label_z.visibility)
cnt = next(z_counter)
if line_slider_label_z.visibility and cnt > 0:
renderer.add(mem.slicer_curr_actor_z)
else:
renderer.rm(mem.slicer_curr_actor_z)
iren.Render()
line_slider_label_z.actor.AddObserver('LeftButtonPressEvent',
label_callback_z,
1.0)
opacity_slider_label = build_label(text="Opacity")
volume_slider_label = build_label(text="Volume")
picker_label = build_label(text='')
double_slider_label = build_label(text='Colormap')
def label_colormap_callback(obj, event):
if mem.slicer_colormap_cnt == len(mem.slicer_colormaps) - 1:
mem.slicer_colormap_cnt = 0
else:
mem.slicer_colormap_cnt += 1
cnt = mem.slicer_colormap_cnt
mem.slicer_colormap = mem.slicer_colormaps[cnt]
double_slider_label.message = mem.slicer_colormap
values = double_slider._values
r1, r2 = values
apply_colormap(r1, r2)
iren.Render()
double_slider_label.actor.AddObserver('LeftButtonPressEvent',
label_colormap_callback,
1.0)
# volume_slider.on_right_mouse_button_released = change_volume2
def label_opacity_callback(obj, event):
if opacity_slider.value == 0:
opacity_slider.value = 100
opacity_slider.update()
slicer_opacity = 1
else:
opacity_slider.value = 0
opacity_slider.update()
slicer_opacity = 0
mem.slicer_curr_actor_x.opacity(slicer_opacity)
mem.slicer_curr_actor_y.opacity(slicer_opacity)
mem.slicer_curr_actor_z.opacity(slicer_opacity)
iren.Render()
opacity_slider_label.actor.AddObserver('LeftButtonPressEvent',
label_opacity_callback,
1.0)
if data.ndim == 4:
panel_size = (400, 400 + 100)
if data.ndim == 3:
panel_size = (400, 300 + 100)
panel = ui.Panel2D(size=panel_size,
position=(850, 110),
color=(1, 1, 1),
opacity=0.1,
align="right")
ys = np.linspace(0, 1, 10)
panel.add_element(line_slider_z, coords=(0.4, ys[1]))
panel.add_element(line_slider_y, coords=(0.4, ys[2]))
panel.add_element(line_slider_x, coords=(0.4, ys[3]))
panel.add_element(opacity_slider, coords=(0.4, ys[4]))
panel.add_element(double_slider, coords=(0.4, (ys[7] + ys[8])/2.))
if data.ndim == 4:
if data.shape[-1] > 3:
panel.add_element(volume_slider, coords=(0.4, ys[6]))
panel.add_element(line_slider_label_z, coords=(0.1, ys[1]))
panel.add_element(line_slider_label_y, coords=(0.1, ys[2]))
panel.add_element(line_slider_label_x, coords=(0.1, ys[3]))
panel.add_element(opacity_slider_label, coords=(0.1, ys[4]))
panel.add_element(double_slider_label, coords=(0.1, (ys[7] + ys[8])/2.))
if data.ndim == 4:
if data.shape[-1] > 3:
panel.add_element(volume_slider_label,
coords=(0.1, ys[6]))
panel.add_element(picker_label, coords=(0.2, ys[5]))
renderer.add(panel)
# initialize colormap
r1, r2 = value_range
apply_colormap(r1, r2)
return panel
| {
"content_hash": "9a4ee5c55723c693287fd53938b62828",
"timestamp": "",
"source": "github",
"line_count": 536,
"max_line_length": 78,
"avg_line_length": 34.509328358208954,
"alnum_prop": 0.5070551981402389,
"repo_name": "FrancoisRheaultUS/dipy",
"id": "db998b85f5747707db7abc98ade93607e6ed65ad",
"size": "18497",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dipy/viz/panel.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2932"
},
{
"name": "Makefile",
"bytes": "3686"
},
{
"name": "Python",
"bytes": "3246086"
}
],
"symlink_target": ""
} |
descr = """Python interface to UMFPACK sparse direct solver."""
DISTNAME = 'scikit-umfpack'
DESCRIPTION = 'Python interface to UMFPACK sparse direct solver.'
LONG_DESCRIPTION = open('README.rst').read()
MAINTAINER = 'Robert Cimrman'
MAINTAINER_EMAIL = 'cimrman3@ntc.zcu.cz'
URL = 'https://scikit-umfpack.github.io/scikit-umfpack'
LICENSE = 'BSD'
DOWNLOAD_URL = URL
VERSION = '0.3.2'
ISRELEASED = False
import sys
import os
import shutil
import subprocess
from distutils.command.clean import clean as Clean
# Return the git revision as a string
def git_version():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except OSError:
GIT_REVISION = "Unknown"
return GIT_REVISION
def get_version_info():
# Adding the git rev number needs to be done inside
# write_version_py(), otherwise the import of scikits.umfpack.version messes
# up the build under Python 3.
FULLVERSION = VERSION
if os.path.exists('.git'):
GIT_REVISION = git_version()
elif os.path.exists('scikits/umfpack/version.py'):
# must be a source distribution, use existing version file
# load it as a separate module to not load scikits/umfpack/__init__.py
import imp
version = imp.load_source('scikits.umfpack.version',
'scikits/umfpack/version.py')
GIT_REVISION = version.git_revision
else:
GIT_REVISION = "Unknown"
if not ISRELEASED:
FULLVERSION += '.dev0+' + GIT_REVISION[:7]
return FULLVERSION, GIT_REVISION
def write_version_py(filename='scikits/umfpack/version.py'):
cnt = """# THIS FILE IS GENERATED FROM scikit-umfpack SETUP.PY
short_version = '%(version)s'
version = '%(version)s'
full_version = '%(full_version)s'
git_revision = '%(git_revision)s'
release = %(isrelease)s
if not release:
version = full_version
"""
FULLVERSION, GIT_REVISION = get_version_info()
a = open(filename, 'w')
try:
a.write(cnt % {'version': VERSION,
'full_version': FULLVERSION,
'git_revision': GIT_REVISION,
'isrelease': str(ISRELEASED)})
finally:
a.close()
###############################################################################
# Optional setuptools features
# We need to import setuptools early, if we want setuptools features,
# as it monkey-patches the 'setup' function
# For some commands, use setuptools
if len(set(('develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
)).intersection(sys.argv)) > 0:
import setuptools
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
)
else:
extra_setuptools_args = dict()
###############################################################################
class CleanCommand(Clean):
description = 'Remove build directories, and compiled file in the source tree'
def run(self):
Clean.run(self)
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('umfpack'):
for filename in filenames:
if (filename.endswith('.so') or filename.endswith('.pyd')
or filename.endswith('.dll')
or filename.endswith('.pyc')):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname == '__pycache__':
shutil.rmtree(os.path.join(dirpath, dirname))
###############################################################################
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'): os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('scikits.umfpack')
config.add_data_files('scikits/__init__.py')
config.add_data_files('MANIFEST.in')
return config
def setup_package():
# Rewrite the version file every time
write_version_py()
cmdclass = {'clean': CleanCommand}
try:
from sphinx.setup_command import BuildDoc as SphinxBuildDoc
class BuildDoc(SphinxBuildDoc):
"""Run in-place build before Sphinx doc build"""
def run(self):
ret = subprocess.call([sys.executable, sys.argv[0], 'build_ext', '-i'])
if ret != 0:
raise RuntimeError("Building failed!")
SphinxBuildDoc.run(self)
cmdclass['build_sphinx'] = BuildDoc
except ImportError:
pass
if not 'sdist' in sys.argv[1:]:
try:
from setuptools.command.test import test as TestCommand
class NoseTestCommand(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# Run nose ensuring that argv simulates running nosetests directly
ret = subprocess.call([sys.executable, sys.argv[0], 'build_ext', '-i'])
if ret != 0:
raise RuntimeError("Building failed!")
import nose
nose.run_exit(argv=['nosetests'])
cmdclass['test'] = NoseTestCommand
except ImportError:
pass
try:
import numpy
INSTALL_REQUIRES = [
'numpy>=' + str(numpy.__version__),
'scipy>=1.0.0rc1',
]
except ImportError:
INSTALL_REQUIRES = []
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
install_requires=INSTALL_REQUIRES,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Topic :: Scientific/Engineering',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
],
platforms = ['Linux', 'Mac OS-X', 'Windows'],
test_suite='nose.collector',
cmdclass=cmdclass,
**extra_setuptools_args)
if (len(sys.argv) >= 2
and ('--help' in sys.argv[1:] or sys.argv[1]
in ('--help-commands', 'egg_info', '--version', 'clean'))):
# For these actions, NumPy is not required.
#
# They are required to succeed without Numpy for example when
# pip is used to install Scikit when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = VERSION
else:
from numpy.distutils.core import setup
metadata['configuration'] = configuration
setup(**metadata)
if __name__ == '__main__':
setup_package()
| {
"content_hash": "f09ff3cff764132898ad72dc4b3f91ad",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 91,
"avg_line_length": 36.29838709677419,
"alnum_prop": 0.5383248167073984,
"repo_name": "rc/scikit-umfpack",
"id": "6425cb2652c918fcf331b32b9b04c35beb75bc2b",
"size": "9025",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "5575"
},
{
"name": "Python",
"bytes": "57664"
}
],
"symlink_target": ""
} |
'''
Copyright (C) 2012 STFC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author Will Rogers, Konrad Jopek
'''
from apel.db.records import Record, InvalidRecordException
from datetime import datetime, timedelta
from xml.dom.minidom import Document
from apel.common import parse_fqan
import time
WITHHELD_DN = 'withheld'
class JobRecord(Record):
'''
Class to represent one job record.
It knows about the structure of the MySQL table and the message format.
It stores its information in a dictionary self._record_content. The keys
are in the same format as in the messages, and are case-sensitive.
'''
def __init__(self):
'''Provide the necessary lists containing message information.'''
Record.__init__(self)
# Fields which are required by the message format.
self._mandatory_fields = ["Site", "LocalJobId",
"WallDuration", "CpuDuration",
"StartTime", "EndTime"]
# This list allows us to specify the order of lines when we construct records.
self._msg_fields = ["Site", "SubmitHost", "MachineName", "Queue", "LocalJobId", "LocalUserId",
"GlobalUserName", "FQAN", "VO", "VOGroup", "VORole", "WallDuration",
"CpuDuration", "Processors", "NodeCount", "StartTime", "EndTime", "InfrastructureDescription", "InfrastructureType",
"MemoryReal", "MemoryVirtual", "ServiceLevelType",
"ServiceLevel"]
# This list specifies the information that goes in the database.
self._db_fields = ["Site", "SubmitHost", "MachineName", "Queue", "LocalJobId", "LocalUserId",
"GlobalUserName", "FQAN", "VO",
"VOGroup", "VORole", "WallDuration", "CpuDuration", "Processors",
"NodeCount", "StartTime", "EndTime", "InfrastructureDescription", "InfrastructureType", "MemoryReal",
"MemoryVirtual", "ServiceLevelType", "ServiceLevel"]
# Not used in the message format, but required by the database.
# self._fqan_fields = ["VO", "VOGroup", "VORole"]
# Fields which are accepted but currently ignored.
self._ignored_fields = ["SubmitHostType", "UpdateTime"]
self._all_fields = self._msg_fields + self._ignored_fields
# Fields which will have an integer stored in them
self._int_fields = ["WallDuration", "CpuDuration", "Processors",
"NodeCount", "MemoryReal",
"MemoryVirtual"]
self._float_fields = ["ServiceLevel"]
self._datetime_fields = ["StartTime", "EndTime"]
# Acceptable values for the ServiceLevelType field, not case-sensitive
self._valid_slts = ["si2k", "hepspec"]
def _check_fields(self):
'''
Add extra checks to those made in every record.
'''
# First, call the parent's version.
Record._check_fields(self)
# Extract the relevant information from the user fqan.
# Keep the fqan itself as other methods in the class use it.
if self._record_content['FQAN'] not in ('None', None):
role, group, vo = parse_fqan(self._record_content['FQAN'])
# We can't / don't put NULL in the database, so we use 'None'
if role is None:
role = 'None'
if group is None:
group = 'None'
if vo is None:
vo = 'None'
self._record_content['VORole'] = role
self._record_content['VOGroup'] = group
# Confusing terminology from the CAR
self._record_content['VO'] = vo
# Check the ScalingFactor.
slt = self._record_content['ServiceLevelType']
sl = self._record_content['ServiceLevel']
(slt, sl) = self._check_factor(slt, sl)
self._record_content['ServiceLevelType'] = slt
self._record_content['ServiceLevel'] = sl
# Check the values of StartTime and EndTime
self._check_start_end_times()
def _check_start_end_times(self):
'''Checks the values of StartTime and EndTime in _record_content.
StartTime should be less than or equal to EndTime.
Neither StartTime or EndTime should be zero.
EndTime should not be in the future.
This is merely factored out for simplicity.
'''
try:
start = self._record_content['StartTime']
end = self._record_content['EndTime']
if end < start:
raise InvalidRecordException("EndTime is before StartTime.")
now = datetime.now()
# add two days to prevent timezone problems
tomorrow = now + timedelta(2)
if end > tomorrow:
raise InvalidRecordException("Epoch time " + str(end) + " is in the future.")
except ValueError:
raise InvalidRecordException("Cannot parse an integer from StartTime or EndTime.")
def _check_factor(self, sfu, sf):
'''
Check for the validity of the ScalingFactorUnit and ScalingFactor fields.
We accept neither field included or both. If only one of the fields is
included, it doesn't really make sense so we reject it.
We expect that:
- as ServiceLevel is a float field,
a null ScalingFactor is passed in as the null object None.
- as ServiceLevelType is a msg field,
a null ScalingFactorUnit is passed in as the string 'None'.
'''
if sf is None:
if sfu != 'None':
raise InvalidRecordException('Unit but not value supplied for ScalingFactor.')
else:
sfu = 'custom'
sf = 1
else: # sf is present
if sfu == 'None':
raise InvalidRecordException('Unit but not value supplied for ScalingFactor.')
else:
if sfu.lower() not in self._valid_slts:
raise InvalidRecordException('ScalingFactorUnit ' + sfu +
' not valid.')
return (sfu, sf)
def get_ur(self, withhold_dns=False):
'''
Returns the JobRecord in CAR format. See
https://twiki.cern.ch/twiki/bin/view/EMI/ComputeAccounting
Namespace information is written only once per record, by dbunloader.
'''
record_id = self.get_field('MachineName') + ' ' + self.get_field('LocalJobId') + ' ' + str(self.get_field('EndTime'))
# Create the document directly
doc = Document()
ur = doc.createElement('urf:UsageRecord')
rec_id = doc.createElement('urf:RecordIdentity')
rec_id.setAttribute('urf:createTime', datetime.now().strftime('%Y-%m-%dT%H:%M:%S'))
rec_id.setAttribute('urf:recordId', record_id)
ur.appendChild(rec_id)
job_id = doc.createElement('urf:JobIdentity')
local_job_id = doc.createElement('urf:LocalJobId')
text = doc.createTextNode(self.get_field('LocalJobId'))
local_job_id.appendChild(text)
job_id.appendChild(local_job_id)
ur.appendChild(job_id)
user_id = doc.createElement('urf:UserIdentity')
if self.get_field('GlobalUserName') is not None:
if withhold_dns:
dn = WITHHELD_DN
else:
dn = self.get_field('GlobalUserName')
global_user_name = doc.createElement('urf:GlobalUserName')
global_user_name.appendChild(doc.createTextNode(dn))
global_user_name.setAttribute('urf:type', 'opensslCompat')
user_id.appendChild(global_user_name)
if self.get_field('VO') is not None:
group = doc.createElement('urf:Group')
group.appendChild(doc.createTextNode(self.get_field('VO')))
user_id.appendChild(group)
if self.get_field('FQAN') is not None:
fqan = doc.createElement('urf:GroupAttribute')
fqan.setAttribute('urf:type', 'FQAN')
fqan.appendChild(doc.createTextNode(self.get_field('FQAN')))
user_id.appendChild(fqan)
if self.get_field('VOGroup') is not None:
vogroup = doc.createElement('urf:GroupAttribute')
vogroup.setAttribute('urf:type', 'vo-group')
vogroup.appendChild(doc.createTextNode(self.get_field('VOGroup')))
user_id.appendChild(vogroup)
if self.get_field('VORole') is not None:
vorole = doc.createElement('urf:GroupAttribute')
vorole.setAttribute('urf:type', 'vo-role')
vorole.appendChild(doc.createTextNode(self.get_field('VORole')))
user_id.appendChild(vorole)
if self.get_field('LocalUserId') is not None:
local_user_id = doc.createElement('urf:LocalUserId')
local_user_id.appendChild(doc.createTextNode(self.get_field('LocalUserId')))
user_id.appendChild(local_user_id)
ur.appendChild(user_id)
status = doc.createElement('urf:Status')
status.appendChild(doc.createTextNode('completed'))
ur.appendChild(status)
infra = doc.createElement('urf:Infrastructure')
infra.setAttribute('urf:type', self.get_field('InfrastructureType'))
ur.appendChild(infra)
wall = doc.createElement('urf:WallDuration')
wall.appendChild(doc.createTextNode('PT'+str(self.get_field('WallDuration'))+'S'))
ur.appendChild(wall)
cpu = doc.createElement('urf:CpuDuration')
cpu.setAttribute('urf:usageType', 'all')
cpu.appendChild(doc.createTextNode('PT'+str(self.get_field('CpuDuration'))+'S'))
ur.appendChild(cpu)
service_level = doc.createElement('urf:ServiceLevel')
service_level.setAttribute('urf:type', self.get_field('ServiceLevelType'))
service_level.appendChild(doc.createTextNode(str(self.get_field('ServiceLevel'))))
ur.appendChild(service_level)
if self.get_field('MemoryReal') > 0:
pmem = doc.createElement('urf:Memory')
pmem.setAttribute('urf:type', 'Physical')
pmem.setAttribute('urf:storageUnit', 'KB')
pmem.appendChild(doc.createTextNode(str(self.get_field('MemoryReal'))))
ur.appendChild(pmem)
if self.get_field('MemoryVirtual') > 0:
vmem = doc.createElement('urf:Memory')
vmem.setAttribute('urf:type', 'Shared')
vmem.setAttribute('urf:storageUnit', 'KB')
vmem.appendChild(doc.createTextNode(str(self.get_field('MemoryVirtual'))))
ur.appendChild(vmem)
if self.get_field('NodeCount') > 0:
ncount = doc.createElement('urf:NodeCount')
ncount.appendChild(doc.createTextNode(str(self.get_field('NodeCount'))))
ur.appendChild(ncount)
if self.get_field('Processors') > 0:
procs = doc.createElement('urf:Processors')
procs.appendChild(doc.createTextNode(str(self.get_field('Processors'))))
ur.appendChild(procs)
end = doc.createElement('urf:EndTime')
end_text = time.strftime('%Y-%m-%dT%H:%M:%SZ', self.get_field('EndTime').timetuple())
end.appendChild(doc.createTextNode(end_text))
ur.appendChild(end)
start = doc.createElement('urf:StartTime')
start_text = time.strftime('%Y-%m-%dT%H:%M:%SZ', self.get_field('StartTime').timetuple())
start.appendChild(doc.createTextNode(start_text))
ur.appendChild(start)
machine = doc.createElement('urf:MachineName')
machine.appendChild(doc.createTextNode(self.get_field('MachineName')))
ur.appendChild(machine)
if self.get_field('SubmitHost') is not None:
subhost = doc.createElement('urf:SubmitHost')
subhost.appendChild(doc.createTextNode(self.get_field('SubmitHost')))
ur.appendChild(subhost)
queue = doc.createElement('urf:Queue')
queue.appendChild(doc.createTextNode(str(self.get_field('Queue'))))
ur.appendChild(queue)
site = doc.createElement('urf:Site')
site.appendChild(doc.createTextNode(self.get_field('Site')))
ur.appendChild(site)
doc.appendChild(ur)
# We don't want the XML declaration, because the whole XML
# document will be assembled by another part of the program.
return doc.documentElement.toxml()
| {
"content_hash": "7ac198e8a525c5c1a4e1b92954c7b424",
"timestamp": "",
"source": "github",
"line_count": 316,
"max_line_length": 139,
"avg_line_length": 41.24683544303797,
"alnum_prop": 0.6123983427957649,
"repo_name": "stfc/apel",
"id": "29087a8c4acede93ff5d1f43b3db25cba8dce38a",
"size": "13034",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apel/db/records/job.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "466479"
},
{
"name": "Shell",
"bytes": "4089"
},
{
"name": "TSQL",
"bytes": "23113"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('program', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='host',
name='is_always_visible',
field=models.BooleanField(default=False, verbose_name='Is always visible'),
),
]
| {
"content_hash": "da1ff76a345f3b5802fac90828f60365",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 87,
"avg_line_length": 22.833333333333332,
"alnum_prop": 0.6058394160583942,
"repo_name": "nnrcschmdt/helsinki",
"id": "9abf2bdc3474a1e7a4f72c8ce794d90af18bde69",
"size": "435",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "program/migrations/0002_host_is_always_visible.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "8863"
},
{
"name": "HTML",
"bytes": "34626"
},
{
"name": "JavaScript",
"bytes": "60025"
},
{
"name": "Python",
"bytes": "97084"
}
],
"symlink_target": ""
} |
"""Fichier contenant le poste second."""
from . import Poste
class Second(Poste):
"""Classe définissant le poste second."""
nom = "second"
autorite = 90
points = 9
nom_parent = "capitaine"
| {
"content_hash": "6f70e07ebe3d49f069bf3fa3a80a0e24",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 45,
"avg_line_length": 17.75,
"alnum_prop": 0.6291079812206573,
"repo_name": "vlegoff/tsunami",
"id": "b23b752631d2df0e6666e150e6682fdc919964a5",
"size": "1781",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/secondaires/navigation/equipage/postes/second.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7930908"
},
{
"name": "Ruby",
"bytes": "373"
}
],
"symlink_target": ""
} |
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class CSMPlugin(object):
"""This is a base class for all plugins. Inheriting from this class is not mandatory,
however the Plugin class must implement the `run` method.
The object constructor must accept a single parameter which represents
the :class:`csmpe.InstallContext` object.
The Plugin class must also have the following attributes.
"""
#: The string representing the name of the plugin.
name = "Plugin Template"
#: The set of strings representing the install phases during which the plugin will be dispatched.
#: Empty set means that plugin will NEVER be executed. The currently supported values are:
#: *Pre-Upgrade*, *Pre-Add*, *Add*, *Pre-Activate*, *Pre-Deactivate*, *Deactivate*,
#: *Remove*, *Commit*
phases = set()
#: The set of strings representing the supported platforms by the plugin. Empty set means ANY platform.
#: The currently supported values are: *ASR9K*, *CRS*, *NCS6K*
platforms = set()
#: The set of operating system type strings. The supported values are: *IOS*, *XR*, *eXR*, *XE*.
#: Empty set means plugin will be executed regardless of the detected operating system.
os = set()
def __init__(self, ctx):
""" This is a constructor of a plugin object. The constructor can be overridden by the plugin code.
The CSM Plugin Engine passes the :class:`csmpe.InstallContext` object
as an argument. This context object provides the API interface for the plugin including:
- Device communication (using condoor)
- CSM status and information update
- Progress, error and status logging.
:param ctx: The install context object :class:`csmpe.InstallContext`
:return: None
"""
self.ctx = ctx
@abc.abstractmethod
def run(self):
"""
This method is a entry point for Plugin Engine to be called when plugin is dispatched.
Must be implemented by the plugin code.
:param: None
:return: None
"""
| {
"content_hash": "201a3d81cd8f2bea7dca4e394edf48cb",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 107,
"avg_line_length": 38.68518518518518,
"alnum_prop": 0.675442795595979,
"repo_name": "anushreejangid/csm-ut",
"id": "d019f4db4d594c8f8429143039654ce3fc911c5c",
"size": "3610",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "csmpe/plugins/base.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "443003"
},
{
"name": "Shell",
"bytes": "419"
}
],
"symlink_target": ""
} |
"""Test the Bosch SHC config flow."""
from unittest.mock import PropertyMock, mock_open, patch
from boschshcpy.exceptions import (
SHCAuthenticationError,
SHCConnectionError,
SHCRegistrationError,
SHCSessionError,
)
from boschshcpy.information import SHCInformation
from homeassistant import config_entries, setup
from homeassistant.components.bosch_shc.config_flow import write_tls_asset
from homeassistant.components.bosch_shc.const import CONF_SHC_CERT, CONF_SHC_KEY, DOMAIN
from tests.common import MockConfigEntry
MOCK_SETTINGS = {
"name": "Test name",
"device": {"mac": "test-mac", "hostname": "test-host"},
}
DISCOVERY_INFO = {
"host": "1.1.1.1",
"port": 0,
"hostname": "shc012345.local.",
"type": "_http._tcp.local.",
"name": "Bosch SHC [test-mac]._http._tcp.local.",
}
async def test_form_user(hass, mock_zeroconf):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {}
with patch(
"boschshcpy.session.SHCSession.mdns_info",
return_value=SHCInformation,
), patch(
"boschshcpy.information.SHCInformation.name",
new_callable=PropertyMock,
return_value="shc012345",
), patch(
"boschshcpy.information.SHCInformation.unique_id",
new_callable=PropertyMock,
return_value="test-mac",
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": "1.1.1.1"},
)
assert result2["type"] == "form"
assert result2["step_id"] == "credentials"
assert result2["errors"] == {}
with patch(
"boschshcpy.register_client.SHCRegisterClient.register",
return_value={
"token": "abc:123",
"cert": b"content_cert",
"key": b"content_key",
},
), patch("os.mkdir"), patch("builtins.open"), patch(
"boschshcpy.session.SHCSession.authenticate"
) as mock_authenticate, patch(
"homeassistant.components.bosch_shc.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"],
{"password": "test"},
)
await hass.async_block_till_done()
assert result3["type"] == "create_entry"
assert result3["title"] == "shc012345"
assert result3["data"] == {
"host": "1.1.1.1",
"ssl_certificate": hass.config.path(DOMAIN, CONF_SHC_CERT),
"ssl_key": hass.config.path(DOMAIN, CONF_SHC_KEY),
"token": "abc:123",
"hostname": "123",
}
assert len(mock_authenticate.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_get_info_connection_error(hass, mock_zeroconf):
"""Test we handle connection error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"boschshcpy.session.SHCSession.mdns_info",
side_effect=SHCConnectionError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"host": "1.1.1.1",
},
)
assert result2["type"] == "form"
assert result2["step_id"] == "user"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_get_info_exception(hass):
"""Test we handle exceptions."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"boschshcpy.session.SHCSession.mdns_info",
side_effect=Exception,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"host": "1.1.1.1",
},
)
assert result2["type"] == "form"
assert result2["step_id"] == "user"
assert result2["errors"] == {"base": "unknown"}
async def test_form_pairing_error(hass, mock_zeroconf):
"""Test we handle pairing error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"boschshcpy.session.SHCSession.mdns_info",
return_value=SHCInformation,
), patch(
"boschshcpy.information.SHCInformation.name",
new_callable=PropertyMock,
return_value="shc012345",
), patch(
"boschshcpy.information.SHCInformation.unique_id",
new_callable=PropertyMock,
return_value="test-mac",
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": "1.1.1.1"},
)
assert result2["type"] == "form"
assert result2["step_id"] == "credentials"
assert result2["errors"] == {}
with patch(
"boschshcpy.register_client.SHCRegisterClient.register",
side_effect=SHCRegistrationError(""),
):
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"],
{"password": "test"},
)
await hass.async_block_till_done()
assert result3["type"] == "form"
assert result3["step_id"] == "credentials"
assert result3["errors"] == {"base": "pairing_failed"}
async def test_form_user_invalid_auth(hass, mock_zeroconf):
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"boschshcpy.session.SHCSession.mdns_info",
return_value=SHCInformation,
), patch(
"boschshcpy.information.SHCInformation.name",
new_callable=PropertyMock,
return_value="shc012345",
), patch(
"boschshcpy.information.SHCInformation.unique_id",
new_callable=PropertyMock,
return_value="test-mac",
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": "1.1.1.1"},
)
assert result2["type"] == "form"
assert result2["step_id"] == "credentials"
assert result2["errors"] == {}
with patch(
"boschshcpy.register_client.SHCRegisterClient.register",
return_value={
"token": "abc:123",
"cert": b"content_cert",
"key": b"content_key",
},
), patch("os.mkdir"), patch("builtins.open"), patch(
"boschshcpy.session.SHCSession.authenticate",
side_effect=SHCAuthenticationError,
):
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"],
{"password": "test"},
)
await hass.async_block_till_done()
assert result3["type"] == "form"
assert result3["step_id"] == "credentials"
assert result3["errors"] == {"base": "invalid_auth"}
async def test_form_validate_connection_error(hass, mock_zeroconf):
"""Test we handle connection error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"boschshcpy.session.SHCSession.mdns_info",
return_value=SHCInformation,
), patch(
"boschshcpy.information.SHCInformation.name",
new_callable=PropertyMock,
return_value="shc012345",
), patch(
"boschshcpy.information.SHCInformation.unique_id",
new_callable=PropertyMock,
return_value="test-mac",
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": "1.1.1.1"},
)
assert result2["type"] == "form"
assert result2["step_id"] == "credentials"
assert result2["errors"] == {}
with patch(
"boschshcpy.register_client.SHCRegisterClient.register",
return_value={
"token": "abc:123",
"cert": b"content_cert",
"key": b"content_key",
},
), patch("os.mkdir"), patch("builtins.open"), patch(
"boschshcpy.session.SHCSession.authenticate",
side_effect=SHCConnectionError,
):
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"],
{"password": "test"},
)
await hass.async_block_till_done()
assert result3["type"] == "form"
assert result3["step_id"] == "credentials"
assert result3["errors"] == {"base": "cannot_connect"}
async def test_form_validate_session_error(hass, mock_zeroconf):
"""Test we handle session error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"boschshcpy.session.SHCSession.mdns_info",
return_value=SHCInformation,
), patch(
"boschshcpy.information.SHCInformation.name",
new_callable=PropertyMock,
return_value="shc012345",
), patch(
"boschshcpy.information.SHCInformation.unique_id",
new_callable=PropertyMock,
return_value="test-mac",
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": "1.1.1.1"},
)
assert result2["type"] == "form"
assert result2["step_id"] == "credentials"
assert result2["errors"] == {}
with patch(
"boschshcpy.register_client.SHCRegisterClient.register",
return_value={
"token": "abc:123",
"cert": b"content_cert",
"key": b"content_key",
},
), patch("os.mkdir"), patch("builtins.open"), patch(
"boschshcpy.session.SHCSession.authenticate",
side_effect=SHCSessionError(""),
):
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"],
{"password": "test"},
)
await hass.async_block_till_done()
assert result3["type"] == "form"
assert result3["step_id"] == "credentials"
assert result3["errors"] == {"base": "session_error"}
async def test_form_validate_exception(hass, mock_zeroconf):
"""Test we handle exception."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"boschshcpy.session.SHCSession.mdns_info",
return_value=SHCInformation,
), patch(
"boschshcpy.information.SHCInformation.name",
new_callable=PropertyMock,
return_value="shc012345",
), patch(
"boschshcpy.information.SHCInformation.unique_id",
new_callable=PropertyMock,
return_value="test-mac",
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": "1.1.1.1"},
)
assert result2["type"] == "form"
assert result2["step_id"] == "credentials"
assert result2["errors"] == {}
with patch(
"boschshcpy.register_client.SHCRegisterClient.register",
return_value={
"token": "abc:123",
"cert": b"content_cert",
"key": b"content_key",
},
), patch("os.mkdir"), patch("builtins.open"), patch(
"boschshcpy.session.SHCSession.authenticate",
side_effect=Exception,
):
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"],
{"password": "test"},
)
await hass.async_block_till_done()
assert result3["type"] == "form"
assert result3["step_id"] == "credentials"
assert result3["errors"] == {"base": "unknown"}
async def test_form_already_configured(hass, mock_zeroconf):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
entry = MockConfigEntry(
domain="bosch_shc", unique_id="test-mac", data={"host": "0.0.0.0"}
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"boschshcpy.session.SHCSession.mdns_info",
return_value=SHCInformation,
), patch(
"boschshcpy.information.SHCInformation.name",
new_callable=PropertyMock,
return_value="shc012345",
), patch(
"boschshcpy.information.SHCInformation.unique_id",
new_callable=PropertyMock,
return_value="test-mac",
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": "1.1.1.1"},
)
assert result2["type"] == "abort"
assert result2["reason"] == "already_configured"
# Test config entry got updated with latest IP
assert entry.data["host"] == "1.1.1.1"
async def test_zeroconf(hass, mock_zeroconf):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
with patch(
"boschshcpy.session.SHCSession.mdns_info",
return_value=SHCInformation,
), patch(
"boschshcpy.information.SHCInformation.name",
new_callable=PropertyMock,
return_value="shc012345",
), patch(
"boschshcpy.information.SHCInformation.unique_id",
new_callable=PropertyMock,
return_value="test-mac",
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
data=DISCOVERY_INFO,
context={"source": config_entries.SOURCE_ZEROCONF},
)
assert result["type"] == "form"
assert result["step_id"] == "confirm_discovery"
assert result["errors"] == {}
context = next(
flow["context"]
for flow in hass.config_entries.flow.async_progress()
if flow["flow_id"] == result["flow_id"]
)
assert context["title_placeholders"]["name"] == "shc012345"
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
assert result2["type"] == "form"
assert result2["step_id"] == "credentials"
with patch(
"boschshcpy.register_client.SHCRegisterClient.register",
return_value={
"token": "abc:123",
"cert": b"content_cert",
"key": b"content_key",
},
), patch("os.mkdir"), patch("builtins.open"), patch(
"boschshcpy.session.SHCSession.authenticate",
), patch(
"homeassistant.components.bosch_shc.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"],
{"password": "test"},
)
await hass.async_block_till_done()
assert result3["type"] == "create_entry"
assert result3["title"] == "shc012345"
assert result3["data"] == {
"host": "1.1.1.1",
"ssl_certificate": hass.config.path(DOMAIN, CONF_SHC_CERT),
"ssl_key": hass.config.path(DOMAIN, CONF_SHC_KEY),
"token": "abc:123",
"hostname": "123",
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_zeroconf_already_configured(hass, mock_zeroconf):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
entry = MockConfigEntry(
domain="bosch_shc", unique_id="test-mac", data={"host": "0.0.0.0"}
)
entry.add_to_hass(hass)
with patch(
"boschshcpy.session.SHCSession.mdns_info",
return_value=SHCInformation,
), patch(
"boschshcpy.information.SHCInformation.name",
new_callable=PropertyMock,
return_value="shc012345",
), patch(
"boschshcpy.information.SHCInformation.unique_id",
new_callable=PropertyMock,
return_value="test-mac",
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
data=DISCOVERY_INFO,
context={"source": config_entries.SOURCE_ZEROCONF},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
# Test config entry got updated with latest IP
assert entry.data["host"] == "1.1.1.1"
async def test_zeroconf_cannot_connect(hass, mock_zeroconf):
"""Test we get the form."""
with patch(
"boschshcpy.session.SHCSession.mdns_info", side_effect=SHCConnectionError
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
data=DISCOVERY_INFO,
context={"source": config_entries.SOURCE_ZEROCONF},
)
assert result["type"] == "abort"
assert result["reason"] == "cannot_connect"
async def test_zeroconf_not_bosch_shc(hass, mock_zeroconf):
"""Test we filter out non-bosch_shc devices."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
data={"host": "1.1.1.1", "name": "notboschshc"},
context={"source": config_entries.SOURCE_ZEROCONF},
)
assert result["type"] == "abort"
assert result["reason"] == "not_bosch_shc"
async def test_reauth(hass, mock_zeroconf):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
mock_config = MockConfigEntry(
domain=DOMAIN,
unique_id="test-mac",
data={
"host": "1.1.1.1",
"hostname": "test-mac",
"ssl_certificate": "test-cert.pem",
"ssl_key": "test-key.pem",
},
title="shc012345",
)
mock_config.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_REAUTH},
data=mock_config.data,
)
assert result["type"] == "form"
assert result["step_id"] == "reauth_confirm"
with patch(
"boschshcpy.session.SHCSession.mdns_info",
return_value=SHCInformation,
), patch(
"boschshcpy.information.SHCInformation.name",
new_callable=PropertyMock,
return_value="shc012345",
), patch(
"boschshcpy.information.SHCInformation.unique_id",
new_callable=PropertyMock,
return_value="test-mac",
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": "2.2.2.2"},
)
assert result2["type"] == "form"
assert result2["step_id"] == "credentials"
assert result2["errors"] == {}
with patch(
"boschshcpy.register_client.SHCRegisterClient.register",
return_value={
"token": "abc:123",
"cert": b"content_cert",
"key": b"content_key",
},
), patch("os.mkdir"), patch("builtins.open"), patch(
"boschshcpy.session.SHCSession.authenticate"
), patch(
"homeassistant.components.bosch_shc.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"],
{"password": "test"},
)
await hass.async_block_till_done()
assert result3["type"] == "abort"
assert result3["reason"] == "reauth_successful"
assert mock_config.data["host"] == "2.2.2.2"
assert len(mock_setup_entry.mock_calls) == 1
async def test_tls_assets_writer(hass):
"""Test we write tls assets to correct location."""
assets = {
"token": "abc:123",
"cert": b"content_cert",
"key": b"content_key",
}
with patch("os.mkdir"), patch("builtins.open", mock_open()) as mocked_file:
write_tls_asset(hass, CONF_SHC_CERT, assets["cert"])
mocked_file.assert_called_with(
hass.config.path(DOMAIN, CONF_SHC_CERT), "w", encoding="utf8"
)
mocked_file().write.assert_called_with("content_cert")
write_tls_asset(hass, CONF_SHC_KEY, assets["key"])
mocked_file.assert_called_with(
hass.config.path(DOMAIN, CONF_SHC_KEY), "w", encoding="utf8"
)
mocked_file().write.assert_called_with("content_key")
| {
"content_hash": "2a99a7d891990ea4da84aaf01c52ee26",
"timestamp": "",
"source": "github",
"line_count": 629,
"max_line_length": 88,
"avg_line_length": 32.426073131955484,
"alnum_prop": 0.5945773681113944,
"repo_name": "FreekingDean/home-assistant",
"id": "6d8ef9bd32ef4228e7788106c82881a027684ab8",
"size": "20396",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "tests/components/bosch_shc/test_config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2335"
},
{
"name": "Python",
"bytes": "36746639"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
"""Processes a list of seeds; outputs the crash signatures for chunks of seeds."""
import argparse
import re
import sys
from pathlib import Path
from typing import List, Set, TextIO
from gfauto import signature_util, util
from gfauto.gflogging import log
from gfauto.util import check
def process_chunk( # pylint: disable=too-many-locals;
chunk_num: int, chunk: Set[str], log_files: List[Path], output_file: TextIO
) -> None:
log(f"\nChunk {chunk_num}:")
output_file.write(f"\nChunk {chunk_num}:\n")
unique_signatures: Set[str] = set()
for log_file in log_files:
with util.file_open_text(log_file, "r") as f:
first_line = f.readline()
match = re.fullmatch(r"Iteration seed: (\d+)\n", first_line)
assert match # noqa
seed = match.group(1)
if seed not in chunk:
continue
lines = f.readlines()
start_line = 0
end_line = 0
found_bug = False
for i, line in enumerate(lines):
match = re.fullmatch(r"STATUS (\w+)\n", line)
if not match:
continue
status = match.group(1)
if status == "SUCCESS":
start_line = i + 1
continue
found_bug = True
end_line = i + 1
break
if not found_bug:
continue
failure_log = "\n".join(lines[start_line:end_line])
signature = signature_util.get_signature_from_log_contents(failure_log)
unique_signatures.add(signature)
# Print the signatures.
for signature in sorted(unique_signatures):
log(signature)
output_file.write(f"{signature}\n")
def main() -> None:
parser = argparse.ArgumentParser(description="Processes a seed file.")
parser.add_argument(
"seed_file", help="Seed file to process.",
)
parser.add_argument(
"--out", help="Output file.", default="signatures_chunked.txt",
)
parsed_args = parser.parse_args(sys.argv[1:])
seed_file: Path = Path(parsed_args.seed_file)
output_file: Path = Path(parsed_args.out)
# Get a list of all log files.
log_files: List[Path] = sorted(Path().glob("log_*.txt"))
# Get chunks of seeds and call process_chunk.
seeds: List[str] = util.file_read_text(seed_file).split()
check(len(seeds) == 10_000, AssertionError("Expected 10,000 seeds."))
with util.file_open_text(output_file, "w") as output:
index = 0
for chunk_num in range(0, 10):
chunk: Set[str] = set()
for _ in range(0, 1_000):
chunk.add(seeds[index])
index += 1
process_chunk(chunk_num, chunk, log_files, output)
check(
index == 10_000, AssertionError("Expected to have processed 10,000 seeds.")
)
if __name__ == "__main__":
main()
| {
"content_hash": "d0bcab6b78c1d7497ed57396e7c21034",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 87,
"avg_line_length": 29.294117647058822,
"alnum_prop": 0.5642570281124498,
"repo_name": "google/graphicsfuzz",
"id": "835d51f65314f6d99b97d875a10b2ccbe9654b23",
"size": "3611",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gfauto/gfauto/process_seeds.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "21057"
},
{
"name": "Batchfile",
"bytes": "18712"
},
{
"name": "C",
"bytes": "1261"
},
{
"name": "C++",
"bytes": "112737"
},
{
"name": "CMake",
"bytes": "3664"
},
{
"name": "CSS",
"bytes": "6774"
},
{
"name": "Dockerfile",
"bytes": "4035"
},
{
"name": "GLSL",
"bytes": "570713"
},
{
"name": "HTML",
"bytes": "9966"
},
{
"name": "Java",
"bytes": "3314649"
},
{
"name": "JavaScript",
"bytes": "75538"
},
{
"name": "Python",
"bytes": "709540"
},
{
"name": "Shell",
"bytes": "62877"
},
{
"name": "Thrift",
"bytes": "7878"
}
],
"symlink_target": ""
} |
class Solution:
def numberToWords(self, num: int) -> str:
below_100 = ['Twenty', 'Thirty', 'Forty', 'Fifty', 'Sixty', 'Seventy', 'Eighty', 'Ninety']
below_20 = ['One', 'Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine', 'Ten',
'Eleven', 'Twelve', 'Thirteen', 'Fourteen', 'Fifteen', 'Sixteen', 'Seventeen', 'Eighteen', 'Nineteen']
result = []
def numberToString(num):
if num >= 10 ** 9:
numberToString(num // (10**9))
result.append('Billion')
numberToString(num % (10 ** 9))
elif num >= 10 ** 6:
numberToString(num // (10**6))
result.append('Million')
numberToString(num % (10 ** 6))
elif num >= 1000:
numberToString(num // 1000)
result.append('Thousand')
numberToString(num % 1000)
elif num >= 100:
numberToString(num // 100)
result.append('Hundred')
numberToString(num % 100)
elif num >= 20:
result.append(below_100[num // 10 - 2])
numberToString(num % 10)
elif num >= 1:
result.append(below_20[num - 1])
if num == 0:
return 'Zero'
else:
numberToString(num)
return ' '.join(result)
| {
"content_hash": "763cd4f3ce1ed90c8429ae7096b45bb9",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 121,
"avg_line_length": 41.88235294117647,
"alnum_prop": 0.4571629213483146,
"repo_name": "jiadaizhao/LeetCode",
"id": "74dc69c849c453fbac806e5a12349f5b0ce6b06f",
"size": "1424",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "0201-0300/0273-Integer to English Words/0273-Integer to English Words.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1140864"
},
{
"name": "Java",
"bytes": "34062"
},
{
"name": "Python",
"bytes": "758800"
},
{
"name": "Shell",
"bytes": "698"
},
{
"name": "TSQL",
"bytes": "774"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.