hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6632ec0fa062339d2cb2ed1e355603c4916820df
| 99,944
|
py
|
Python
|
tensorflow/python/ops/embedding_variable_ops_test.py
|
silingtong123/DeepRec
|
c9d247d2d7d0c3b3bbc1bab0d734bd0013b449b6
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/ops/embedding_variable_ops_test.py
|
silingtong123/DeepRec
|
c9d247d2d7d0c3b3bbc1bab0d734bd0013b449b6
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/ops/embedding_variable_ops_test.py
|
silingtong123/DeepRec
|
c9d247d2d7d0c3b3bbc1bab0d734bd0013b449b6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2017, Alibaba Inc.
# All right reserved.
#
# Author: Chen Ding <cnady.dc@alibaba-inc.com>
# Created: 2018/03/26
# Description:
# ==============================================================================
"""Tests for tensorflow.ops.embedding_variable."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import os
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import string_ops
from tensorflow.python.ops.check_ops import assert_equal
from tensorflow.python.platform import googletest
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import kv_variable_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import sparse_tensor
from tensorflow.core.framework.embedding import config_pb2
from tensorflow.python.training import ftrl
from tensorflow.python.training import adam
from tensorflow.python.training import adam_async
from tensorflow.python.training import adagrad
from tensorflow.python.training import adagrad_decay
from tensorflow.python.training import adagrad_decay_v2
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import saver as saver_module
from tensorflow.python.training import training_util
from tensorflow.python.ops import variables
from tensorflow.contrib.layers.python.layers import embedding_ops as emb_ops
from tensorflow.contrib.layers.python.layers import feature_column_ops
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import loader
class EmbeddingVariableTest(test_util.TensorFlowTestCase):
def testDynamicDimensionEmbeddingVariable(self):
print("testDynamicDimensionEmbeddingVariable")
with ops.device('/cpu:0'):
def runTestAdagradDecay(self, var, g):
if isinstance(var, kv_variable_ops.EmbeddingVariable):
emb = embedding_ops.embedding_lookup(var, math_ops.cast([0,1,2,5,6,7], dtypes.int64))
else:
emb = embedding_ops.embedding_lookup(var, math_ops.cast([0,1,2,5,6,7], dtypes.int64), blocknums=[2,2,2,2,2,2])
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
gs = training_util.get_or_create_global_step()
opt = adagrad_decay.AdagradDecayOptimizer(0.1, gs)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
init = variables.global_variables_initializer()
with self.test_session(graph=g) as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
return r
with ops.device('/cpu:0'), ops.Graph().as_default() as g:
emb_var = variable_scope.get_embedding_variable("var_1",
initializer=init_ops.ones_initializer(dtypes.float32),
embedding_dim = 8,
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=4))
emb1 = runTestAdagradDecay(self, emb_var, g)
with ops.device('/cpu:0'), ops.Graph().as_default() as g:
var = variable_scope.get_dynamic_dimension_embedding_variable("var_dist",
embedding_block_dimension=4,
embedding_block_num=2,
initializer=init_ops.ones_initializer(dtypes.float32))
emb2 = runTestAdagradDecay(self, var, g)
for i in range(0, 6):
for j in range(0, 8):
self.assertEqual(emb1.tolist()[i][j], emb2.tolist()[i][j])
def testDynamicEmbeddingVariableForInitFromProto(self):
print("testDynamicEmbeddingVariableForInitFromProto")
embedding = variable_scope.get_dynamic_dimension_embedding_variable("var_dist",
embedding_block_dimension=4,
embedding_block_num=2,
initializer=init_ops.ones_initializer(dtypes.float32))
emb = embedding_ops.embedding_lookup(embedding, math_ops.cast([0,1,2,5,6,7], dtypes.int64), blocknums=[2,2,2,2,2,2])
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
opt = ftrl.FtrlOptimizer(0.1, l1_regularization_strength=2.0, l2_regularization_strength=0.00001)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
graph = ops.get_default_graph()
meta_graph_def = saver_module.export_meta_graph()
ops.reset_default_graph()
with self.test_session() as sess:
res = saver_module.import_meta_graph(meta_graph_def)
def testEmbeddingVariableForInitFromProto(self):
print("testEmbeddingVariableForInitFromProto")
embedding = variable_scope.get_embedding_variable("var_dist",
embedding_dim=6,
initializer=init_ops.ones_initializer,
steps_to_live = 4,
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=4))
emb = embedding_ops.embedding_lookup(embedding, math_ops.cast([0,1,2,5,6,7], dtypes.int64))
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
opt = ftrl.FtrlOptimizer(0.1, l1_regularization_strength=2.0, l2_regularization_strength=0.00001)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
graph = ops.get_default_graph()
meta_graph_def = saver_module.export_meta_graph()
ops.reset_default_graph()
with self.test_session() as sess:
res = saver_module.import_meta_graph(meta_graph_def)
def testEmbeddingVariableForLookupInt64(self):
print("testEmbeddingVariableForLookupInt64")
var = variable_scope.get_embedding_variable("var_1",
embedding_dim = 3,
initializer=init_ops.ones_initializer(dtypes.float32),
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=4))
emb = embedding_ops.embedding_lookup(var, math_ops.cast([0,1,2,5,6,-7], dtypes.int64))
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
opt = ftrl.FtrlOptimizer(0.1, l1_regularization_strength=2.0, l2_regularization_strength=0.00001)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
init = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
print(sess.run([emb, train_op,loss]))
print(sess.run([emb, train_op,loss]))
print(sess.run([emb, train_op,loss]))
print(sess.run([emb, train_op,loss]))
print(sess.run([emb, train_op,loss]))
def testEmbeddingVariableForLookupInt32(self):
print("testEmbeddingVariableForLookupInt32")
checkpoint_directory = self.get_temp_dir()
var = variable_scope.get_embedding_variable("var_1",
embedding_dim = 3,
key_dtype=dtypes.int32,
initializer=init_ops.ones_initializer(dtypes.float32),
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=4))
emb = embedding_ops.embedding_lookup(var, math_ops.cast([0,1,2,5,6,-7], dtypes.int32))
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
opt = adam.AdamOptimizer(0.01)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
saver = saver_module.Saver()
init = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
sess.run([train_op])
model_path = os.path.join(checkpoint_directory, "model.ckpt")
save_path = saver.save(sess, model_path, global_step=12345)
saver.restore(sess, save_path)
def testEmbeddingVariableForExport(self):
print("testEmbeddingVariableForExport")
ev_config = variables.EmbeddingVariableOption(filter_option=variables.CounterFilter(filter_freq=1))
var = variable_scope.get_embedding_variable("var_1", embedding_dim=3,
initializer=init_ops.ones_initializer(dtypes.float32), steps_to_live=10000, ev_option=ev_config)
emb = embedding_ops.embedding_lookup(var, math_ops.cast([0,1,2,5,6,7], dtypes.int64))
init = variables.global_variables_initializer()
keys, values, versions, freqs = var.export()
with self.test_session() as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
sess.run(emb)
sess.run(emb)
sess.run(emb)
fetches = sess.run([keys, values, versions, freqs])
print(fetches)
self.assertAllEqual([0, 1, 2, 5, 6, 7], fetches[0])
self.assertAllEqual([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], fetches[1])
self.assertAllEqual([0, 0, 0, 0, 0, 0], fetches[2])
self.assertAllEqual([1, 1, 1, 1, 1, 1], fetches[3])
def testEmbeddingVariableForGetShape(self):
print("testEmbeddingVariableForGetShape")
var = variable_scope.get_embedding_variable("var_1",
embedding_dim = 3,
initializer=init_ops.ones_initializer(dtypes.float32))
emb = embedding_ops.embedding_lookup(var, math_ops.cast([0,1,2,5,6,7], dtypes.int64))
shape = var.total_count()
init = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
sess.run([emb])
self.assertAllEqual([6, 3], sess.run(shape))
def testEmbeddingVariableForMultiHashAdd(self):
print("testEmbeddingVariableForMultiHashAdd")
with ops.device('/cpu:0'):
var1 = variable_scope.get_variable("var_1", shape=[5,6],
initializer=init_ops.ones_initializer(dtypes.float32),
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=2))
var2 = variable_scope.get_variable("var_2", shape=[3,6],
initializer=init_ops.ones_initializer(dtypes.float32),
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=2))
ids_Q = math_ops.cast([0//5, 1//5, 2//5 , 4//5, 6//5, 7//5],dtypes.int64)
ids_R = math_ops.cast([0%3, 1%3, 2%3 , 4%3, 6%3, 7%3],dtypes.int64)
emb1 = embedding_ops.embedding_lookup(var1, ids_Q)
emb2 = embedding_ops.embedding_lookup(var2, ids_R)
emb = math_ops.add(emb1, emb2)
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
gs = training_util.get_or_create_global_step()
opt = adagrad_decay.AdagradDecayOptimizer(0.1, gs)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
ids = math_ops.cast([0, 1, 2, 4, 6, 7], dtypes.int64)
var_multi = variable_scope.get_multihash_variable("var_multi",
[[5,6],[3,6]],
complementary_strategy="Q-R",
initializer=init_ops.ones_initializer,
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=2)
)
emb_multi = embedding_ops.embedding_lookup(var_multi, ids)
fun_m = math_ops.multiply(emb_multi, 2.0, name='multiply')
loss_m = math_ops.reduce_sum(fun_m, name='reduce_sum')
gs_m = training_util.get_or_create_global_step()
opt_m = adagrad_decay.AdagradDecayOptimizer(0.1, gs_m)
g_v_m = opt_m.compute_gradients(loss_m)
train_op_m = opt_m.apply_gradients(g_v_m)
init = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
sess.run([train_op, train_op_m])
val_list = sess.run([emb, emb_multi])
for i in range(ids.shape.as_list()[0]):
self.assertAllEqual(val_list[0][i], val_list[1][i])
def testEmbeddingVariableForMultiHashMul(self):
print("testEmbeddingVariableForMultiHashMul")
with ops.device('/cpu:0'):
var1 = variable_scope.get_variable("var_1", shape=[5,6],
initializer=init_ops.ones_initializer(dtypes.float32),
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=2))
var2 = variable_scope.get_variable("var_2", shape=[3,6],
initializer=init_ops.ones_initializer(dtypes.float32),
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=2))
ids_Q = math_ops.cast([0//5, 1//5, 2//5 , 4//5, 6//5, 7//5],dtypes.int64)
ids_R = math_ops.cast([0%3, 1%3, 2%3 , 4%3, 6%3, 7%3],dtypes.int64)
emb1 = embedding_ops.embedding_lookup(var1, ids_Q)
emb2 = embedding_ops.embedding_lookup(var2, ids_R)
emb = math_ops.multiply(emb1, emb2)
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
gs = training_util.get_or_create_global_step()
opt = adagrad_decay.AdagradDecayOptimizer(0.1, gs)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
ids = math_ops.cast([0, 1, 2, 4, 6, 7], dtypes.int64)
var_multi = variable_scope.get_multihash_variable("var_multi",
[[5,6],[3,6]],
complementary_strategy="Q-R",
operation="mul",
initializer=init_ops.ones_initializer,
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=2)
)
emb_multi = embedding_ops.embedding_lookup(var_multi, ids)
fun_m = math_ops.multiply(emb_multi, 2.0, name='multiply')
loss_m = math_ops.reduce_sum(fun_m, name='reduce_sum')
gs_m = training_util.get_or_create_global_step()
opt_m = adagrad_decay.AdagradDecayOptimizer(0.1, gs_m)
g_v_m = opt_m.compute_gradients(loss_m)
train_op_m = opt_m.apply_gradients(g_v_m)
init = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
sess.run([train_op, train_op_m])
val_list = sess.run([emb, emb_multi])
for i in range(ids.shape.as_list()[0]):
self.assertAllEqual(val_list[0][i], val_list[1][i])
def testEmbeddingVariableForMultiHashConcat(self):
print("testEmbeddingVariableForMultiHashConcat")
with ops.device('/cpu:0'):
var1 = variable_scope.get_variable("var_1", shape=[5,6],
initializer=init_ops.ones_initializer(dtypes.float32),
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=2))
var2 = variable_scope.get_variable("var_2", shape=[3,6],
initializer=init_ops.ones_initializer(dtypes.float32),
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=2))
ids_Q = math_ops.cast([0//5, 1//5, 2//5 , 4//5, 6//5, 7//5],dtypes.int64)
ids_R = math_ops.cast([0%3, 1%3, 2%3 , 4%3, 6%3, 7%3],dtypes.int64)
emb1 = embedding_ops.embedding_lookup(var1, ids_Q)
emb2 = embedding_ops.embedding_lookup(var2, ids_R)
emb = array_ops.concat([emb1, emb2], 1)
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
gs = training_util.get_or_create_global_step()
opt = adagrad_decay.AdagradDecayOptimizer(0.1, gs)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
ids = math_ops.cast([0, 1, 2, 4, 6, 7], dtypes.int64)
var_multi = variable_scope.get_multihash_variable("var_multi",
[[5,6],[3,6]],
complementary_strategy="Q-R",
operation="concat",
initializer=init_ops.ones_initializer,
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=2)
)
emb_multi = embedding_ops.embedding_lookup(var_multi, ids)
fun_m = math_ops.multiply(emb_multi, 2.0, name='multiply')
loss_m = math_ops.reduce_sum(fun_m, name='reduce_sum')
gs_m = training_util.get_or_create_global_step()
opt_m = adagrad_decay.AdagradDecayOptimizer(0.1, gs_m)
g_v_m = opt_m.compute_gradients(loss_m)
train_op_m = opt_m.apply_gradients(g_v_m)
init = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
sess.run([train_op, train_op_m])
val_list = sess.run([emb, emb_multi])
for i in range(ids.shape.as_list()[0]):
self.assertAllEqual(val_list[0][i], val_list[1][i])
def testEmbeddingVariableForSaveAndRestore(self):
print("testEmbeddingVariableForSaveAndRestore")
checkpoint_directory = self.get_temp_dir()
var = variable_scope.get_embedding_variable("var_1",
embedding_dim = 3,
initializer=init_ops.ones_initializer(dtypes.float32),
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=4))
emb = embedding_ops.embedding_lookup(var, math_ops.cast([0,1,2,5,6,7], dtypes.int64))
saver = saver_module.Saver(sharded=True)
init = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
emb_ori = sess.run(emb)
emb_ori = sess.run(emb)
emb_ori = sess.run(emb)
emb_ori = sess.run(emb)
save_path = saver.save(sess, os.path.join(checkpoint_directory, "model.ckpt"), global_step=12345)
print(save_path)
for name, shape in checkpoint_utils.list_variables(checkpoint_directory):
print('loading... ', name, shape)
with self.test_session() as sess:
saver.restore(sess, os.path.join(checkpoint_directory, "model.ckpt-12345"))
self.assertAllEqual(emb_ori, sess.run(emb))
def testEmbeddingVariableForL2FeatureEvictionFromContribFeatureColumn(self):
print("testEmbeddingVariableForL2FeatureEvictionFromContribFeatureColumn")
checkpoint_directory = self.get_temp_dir()
evict = variables.L2WeightEvict(l2_weight_threshold=0.9)
columns = feature_column.sparse_column_with_embedding(
column_name="col_emb",
dtype=dtypes.int64,
ev_option = variables.EmbeddingVariableOption(evict_option=evict))
W = feature_column.embedding_column(sparse_id_column=columns,
dimension=3,
initializer=init_ops.ones_initializer(dtypes.float32),
combiner="mean")
ids = {}
ids["col_emb"] = sparse_tensor.SparseTensor(
indices=[[0,0],[1,0],[2,0],[3,0],[4,0],[5,0]],
values=math_ops.cast([0,0,0,1,1,2], dtypes.int64),
dense_shape=[6, 1])
emb= feature_column_ops.input_from_feature_columns(
columns_to_tensors=ids, feature_columns=[W])
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
opt = ftrl.FtrlOptimizer(0.1, l1_regularization_strength=2.0, l2_regularization_strength=0.00001)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
saver = saver_module.Saver()
init = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
emb_ori = sess.run([emb, train_op])
save_path = saver.save(sess, os.path.join(checkpoint_directory, "model1.ckpt"), global_step=12345)
with self.test_session() as sess:
saver.restore(sess, os.path.join(checkpoint_directory, "model1.ckpt-12345"))
emb_right = [[0.8282884, 0.8282884, 0.8282884],
[0.8282884, 0.8282884, 0.8282884],
[0.8282884, 0.8282884, 0.8282884],
[0.7927219, 0.7927219, 0.7927219],
[0.7927219, 0.7927219, 0.7927219],
[1.0, 1.0, 1.0]]
emb_ori = sess.run(emb)
for i in range(6):
for j in range(3):
self.assertAlmostEqual(emb_ori[i][j], emb_right[i][j])
def testEmbeddingVariableForL2FeatureEviction(self):
print("testEmbeddingVariableForL2FeatureEviction")
checkpoint_directory = self.get_temp_dir()
evict = variables.L2WeightEvict(l2_weight_threshold=0.9)
var = variable_scope.get_embedding_variable("var_1",
embedding_dim = 3,
initializer=init_ops.ones_initializer(dtypes.float32),
ev_option = variables.EmbeddingVariableOption(evict_option=evict))
emb = embedding_ops.embedding_lookup(var, math_ops.cast([0,0,0,1,1,2], dtypes.int64))
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
opt = ftrl.FtrlOptimizer(0.1, l1_regularization_strength=2.0, l2_regularization_strength=0.00001)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
saver = saver_module.Saver()
init = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
emb_ori = sess.run([emb, train_op])
save_path = saver.save(sess, os.path.join(checkpoint_directory, "model1.ckpt"), global_step=12345)
#for name, shape in checkpoint_utils.list_variables(checkpoint_directory):
# print('loading... ', name, shape)
with self.test_session() as sess:
saver.restore(sess, os.path.join(checkpoint_directory, "model1.ckpt-12345"))
emb_right = [[0.8282884, 0.8282884, 0.8282884],
[0.8282884, 0.8282884, 0.8282884],
[0.8282884, 0.8282884, 0.8282884],
[0.7927219, 0.7927219, 0.7927219],
[0.7927219, 0.7927219, 0.7927219],
[1.0, 1.0, 1.0]]
emb_ori = sess.run(emb)
for i in range(6):
for j in range(3):
self.assertAlmostEqual(emb_ori[i][j], emb_right[i][j])
def testEmbeddingVariableForSparseColumnSharedEmbeddingCol(self):
columns_list=[]
columns_list.append(feature_column.sparse_column_with_embedding(column_name="col_emb", dtype=dtypes.string))
W = feature_column.shared_embedding_columns(sparse_id_columns=columns_list,
dimension=3,
initializer=init_ops.ones_initializer(dtypes.float32),
shared_embedding_name="xxxxx_shared")
ids={}
ids["col_emb"] = sparse_tensor.SparseTensor(indices=[[0,0],[1,0],[2,0],[3,0],[4,0]], values=["aaaa","bbbbb","ccc","4nn","5b"], dense_shape=[5, 5])
emb = feature_column_ops.input_from_feature_columns(columns_to_tensors=ids, feature_columns=W)
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
opt = ftrl.FtrlOptimizer(0.1, l1_regularization_strength=2.0, l2_regularization_strength=0.00001)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
init = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run(init)
print("init global done")
print(sess.run([emb, train_op,loss]))
print(sess.run([emb, train_op,loss]))
print(sess.run([emb, train_op,loss]))
def testEmbeddingVariableForFeatureFilterFromContribFeatureColumn(self):
print("testEmbeddingVariableForFeatureFilterFromContribFeatureColumn")
columns = feature_column.sparse_column_with_embedding(column_name="col_emb", dtype=dtypes.int64,
ev_option = variables.EmbeddingVariableOption(filter_option=variables.CounterFilter(filter_freq=3)))
W = feature_column.embedding_column(sparse_id_column=columns,
dimension=3,
initializer=init_ops.ones_initializer(dtypes.float32))
ids={}
ids["col_emb"] = sparse_tensor.SparseTensor(indices=[[0,0],[1,0],[2,0],[3,0],[4,0],[5,0],[6,0],[7,0],[8,0],[9,0]], values=math_ops.cast([1,1,1,1,2,2,2,3,3,4], dtypes.int64), dense_shape=[10, 1])
emb = feature_column_ops.input_from_feature_columns(columns_to_tensors=ids, feature_columns=[W])
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
opt = ftrl.FtrlOptimizer(0.1, l1_regularization_strength=2.0, l2_regularization_strength=0.00001)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
init = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
emb1, top, l = sess.run([emb, train_op, loss])
for val1 in emb1.tolist():
for val in val1:
self.assertEqual(val, 1.0)
emb1, top, l = sess.run([emb, train_op, loss])
for index, val1 in enumerate(emb1.tolist()):
if index < 7:
for val in val1:
self.assertNotEqual(val, 1.0)
else:
for val in val1:
self.assertEqual(val, 1.0)
def testEmbeddingVariableForSparseColumnEmbeddingCol(self):
columns = feature_column.sparse_column_with_embedding(column_name="col_emb", dtype=dtypes.int64)
W = feature_column.embedding_column(sparse_id_column=columns,
dimension=3,
initializer=init_ops.ones_initializer(dtypes.float32))
ids={}
ids["col_emb"] = sparse_tensor.SparseTensor(indices=[[0,0],[1,1],[2,2],[3,3],[4,4]], values=math_ops.cast([1,2,3,4,5], dtypes.int64), dense_shape=[5, 4])
emb = feature_column_ops.input_from_feature_columns(columns_to_tensors=ids, feature_columns=[W])
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
opt = ftrl.FtrlOptimizer(0.1, l1_regularization_strength=2.0, l2_regularization_strength=0.00001)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
init = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run(init)
print("init global done")
print(sess.run([emb, train_op,loss]))
print(sess.run([emb, train_op,loss]))
print(sess.run([emb, train_op,loss]))
def testEmbeddingVariableForShrinkNone(self):
print("testEmbeddingVariableForShrink")
var = variable_scope.get_embedding_variable("var_1",
embedding_dim = 3,
steps_to_live = 5,
initializer=init_ops.ones_initializer(dtypes.float32))
ids = array_ops.placeholder(dtype=dtypes.int64, name='ids')
emb = embedding_ops.embedding_lookup(var, ids)
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
opt = ftrl.FtrlOptimizer(0.1, l1_regularization_strength=2.0, l2_regularization_strength=0.00001)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
gs = training_util.get_or_create_global_step()
init = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
for i in range(10):
print(sess.run([emb, train_op,loss], feed_dict={'ids:0': 2*i}))
def testEmbeddingVariableForWeightedSumFromFeatureColumn(self):
print("testEmbeddingVariableForWeightedSumFromFeatureColumn")
columns_list=[]
columns_list.append(feature_column.sparse_column_with_embedding(column_name="col_emb", dtype=dtypes.string))
ids={}
ids["col_emb"] = sparse_tensor.SparseTensor(indices=[[0,0],[1,0],[2,0],[3,0],[4,0]], values=["aaaa","bbbbb","ccc","4nn","5b"], dense_shape=[5, 5])
emb, _, _ = feature_column_ops.weighted_sum_from_feature_columns(columns_to_tensors=ids, feature_columns=columns_list, num_outputs=2)
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
opt = ftrl.FtrlOptimizer(0.1, l1_regularization_strength=2.0, l2_regularization_strength=0.00001)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
init = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run(init)
print("init global done")
print(sess.run([emb, train_op,loss]))
print(sess.run([emb, train_op,loss]))
print(sess.run([emb, train_op,loss]))
def testEmbeddingVariableForBloomFilterInt64(self):
print("testEmbeddingVariableForBloomFilterInt64")
var = variable_scope.get_embedding_variable("var_1",
embedding_dim = 3,
initializer=init_ops.ones_initializer(dtypes.float32),
ev_option = variables.EmbeddingVariableOption(filter_option=variables.CBFFilter(
filter_freq=3,
max_element_size = 5,
false_positive_probability = 0.01)),
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=4))
emb = embedding_ops.embedding_lookup(var, math_ops.cast([1], dtypes.int64))
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
gs = training_util.get_or_create_global_step()
opt = adagrad_decay.AdagradDecayOptimizer(0.1, gs)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
init = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
emb1, top, l = sess.run([emb, train_op, loss])
emb1, top, l = sess.run([emb, train_op, loss])
emb1, top, l = sess.run([emb, train_op, loss])
for val in emb1.tolist()[0]:
self.assertEqual(val, 1.0)
emb1, top, l = sess.run([emb, train_op, loss])
for val in emb1.tolist()[0]:
self.assertNotEqual(val, 1.0)
def testEmbeddingVariableForBloomFilterInt32(self):
print("testEmbeddingVariableForBloomFilterInt32")
var = variable_scope.get_embedding_variable("var_1",
embedding_dim = 3,
initializer=init_ops.ones_initializer(dtypes.float32),
ev_option = variables.EmbeddingVariableOption(filter_option=variables.CBFFilter(
filter_freq=3,
max_element_size = 5,
false_positive_probability = 0.01,
counter_type = dtypes.uint32
)),
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=4))
emb = embedding_ops.embedding_lookup(var, math_ops.cast([1], dtypes.int64))
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
gs = training_util.get_or_create_global_step()
opt = adagrad_decay.AdagradDecayOptimizer(0.1, gs)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
init = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
emb1, top, l = sess.run([emb, train_op, loss])
emb1, top, l = sess.run([emb, train_op, loss])
emb1, top, l = sess.run([emb, train_op, loss])
for val in emb1.tolist()[0]:
self.assertEqual(val, 1.0)
emb1, top, l = sess.run([emb, train_op, loss])
for val in emb1.tolist()[0]:
self.assertNotEqual(val, 1.0)
def testEmbeddingVariableForBloomFilterInt8(self):
print("testEmbeddingVariableForBloomFilterInt8")
var = variable_scope.get_embedding_variable("var_1",
embedding_dim = 3,
initializer=init_ops.ones_initializer(dtypes.float32),
ev_option = variables.EmbeddingVariableOption(filter_option=variables.CBFFilter(
filter_freq=3,
max_element_size = 5,
false_positive_probability = 0.01,
counter_type = dtypes.uint8
)),
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=4))
emb = embedding_ops.embedding_lookup(var, math_ops.cast([1], dtypes.int64))
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
gs = training_util.get_or_create_global_step()
opt = adagrad_decay.AdagradDecayOptimizer(0.1, gs)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
init = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
emb1, top, l = sess.run([emb, train_op, loss])
emb1, top, l = sess.run([emb, train_op, loss])
emb1, top, l = sess.run([emb, train_op, loss])
for val in emb1.tolist()[0]:
self.assertEqual(val, 1.0)
emb1, top, l = sess.run([emb, train_op, loss])
for val in emb1.tolist()[0]:
self.assertNotEqual(val, 1.0)
def testEmbeddingVariableForBloomFilterInt16(self):
print("testEmbeddingVariableForBloomFilterInt16")
var = variable_scope.get_embedding_variable("var_1",
embedding_dim = 3,
initializer=init_ops.ones_initializer(dtypes.float32),
ev_option = variables.EmbeddingVariableOption(filter_option=variables.CBFFilter(
filter_freq=3,
max_element_size = 5,
false_positive_probability = 0.01,
counter_type = dtypes.uint16
)),
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=4))
emb = embedding_ops.embedding_lookup(var, math_ops.cast([1], dtypes.int64))
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
gs = training_util.get_or_create_global_step()
opt = adagrad_decay.AdagradDecayOptimizer(0.1, gs)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
init = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
emb1, top, l = sess.run([emb, train_op, loss])
emb1, top, l = sess.run([emb, train_op, loss])
emb1, top, l = sess.run([emb, train_op, loss])
for val in emb1.tolist()[0]:
self.assertEqual(val, 1.0)
emb1, top, l = sess.run([emb, train_op, loss])
for val in emb1.tolist()[0]:
self.assertNotEqual(val, 1.0)
def testEmbeddingVariableForAdagradDecayFilter(self):
print("testEmbeddingVariableForAdagradDecayFilter")
var = variable_scope.get_embedding_variable("var_1",
embedding_dim = 3,
initializer=init_ops.ones_initializer(dtypes.float32),
ev_option = variables.EmbeddingVariableOption(filter_option=variables.CounterFilter(filter_freq=3)),
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=1))
emb = embedding_ops.embedding_lookup(var, math_ops.cast([1], dtypes.int64))
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
gs = training_util.get_or_create_global_step()
opt = adagrad_decay.AdagradDecayOptimizer(0.1, gs)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
init = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
emb1, top, l = sess.run([emb, train_op, loss])
emb1, top, l = sess.run([emb, train_op, loss])
emb1, top, l = sess.run([emb, train_op, loss])
for val in emb1.tolist()[0]:
self.assertEqual(val, 1.0)
emb1, top, l = sess.run([emb, train_op, loss])
for val in emb1.tolist()[0]:
self.assertNotEqual(val, 1.0)
def testEmbeddingVariableForFtrlFilter(self):
print("testEmbeddingVariableForFtrlFilter")
var = variable_scope.get_embedding_variable("var_1",
embedding_dim = 3,
ev_option = variables.EmbeddingVariableOption(filter_option=variables.CounterFilter(filter_freq=3)),
initializer=init_ops.ones_initializer(dtypes.float32),
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=4))
#var = variable_scope.get_variable("var_2", shape=[100, 3], initializer=init_ops.ones_initializer(dtypes.float32))
emb = embedding_ops.embedding_lookup(var, math_ops.cast([1], dtypes.int64))
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
gs = training_util.get_or_create_global_step()
opt = ftrl.FtrlOptimizer(0.1, l1_regularization_strength=2.0, l2_regularization_strength=0.00001)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
init = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
emb1, top, l = sess.run([emb, train_op, loss])
emb1, top, l = sess.run([emb, train_op, loss])
emb1, top, l = sess.run([emb, train_op, loss])
for val in emb1.tolist()[0]:
self.assertEqual(val, 1.0)
emb1, top, l = sess.run([emb, train_op, loss])
for val in emb1.tolist()[0]:
self.assertNotEqual(val, 1.0)
def testEmbeddingVariableForAdamAsyncFilter(self):
print("testEmbeddingVariableForAdamAsynsFilter")
var = variable_scope.get_embedding_variable("var_1",
embedding_dim = 3,
ev_option = variables.EmbeddingVariableOption(filter_option=variables.CounterFilter(filter_freq=3)),
initializer=init_ops.ones_initializer(dtypes.float32),
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=4))
emb = embedding_ops.embedding_lookup(var, math_ops.cast([1], dtypes.int64))
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
gs = training_util.get_or_create_global_step()
opt = adam_async.AdamAsyncOptimizer(0.1)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
init = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
emb1, top, l = sess.run([emb, train_op, loss])
emb1, top, l = sess.run([emb, train_op, loss])
emb1, top, l = sess.run([emb, train_op, loss])
for val in emb1.tolist()[0]:
self.assertEqual(val, 1.0)
emb1, top, l = sess.run([emb, train_op, loss])
for val in emb1.tolist()[0]:
self.assertNotEqual(val, 1.0)
def testEmbeddingVariableForGradientDescentFilter(self):
print("testEmbeddingVariableForGradientDescentFilter")
var = variable_scope.get_embedding_variable("var_1",
embedding_dim = 3,
ev_option = variables.EmbeddingVariableOption(filter_option=variables.CounterFilter(filter_freq=3)),
initializer=init_ops.ones_initializer(dtypes.float32),
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=4))
emb = embedding_ops.embedding_lookup(var, math_ops.cast([1], dtypes.int64))
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
gs = training_util.get_or_create_global_step()
opt = gradient_descent.GradientDescentOptimizer(0.1)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
init = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
emb1, top, l = sess.run([emb, train_op, loss])
emb1, top, l = sess.run([emb, train_op, loss])
emb1, top, l = sess.run([emb, train_op, loss])
for val in emb1.tolist()[0]:
self.assertEqual(val, 1.0)
emb1, top, l = sess.run([emb, train_op, loss])
for val in emb1.tolist()[0]:
self.assertNotEqual(val, 1.0)
def testEmbeddingVariableForAdagradDecayV2Filter(self):
print("testEmbeddingVariableForAdagradDecayV2Filter")
var = variable_scope.get_embedding_variable("var_1",
embedding_dim = 3,
ev_option = variables.EmbeddingVariableOption(filter_option=variables.CounterFilter(filter_freq=3)),
initializer=init_ops.ones_initializer(dtypes.float32),
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=4))
emb = embedding_ops.embedding_lookup(var, math_ops.cast([1], dtypes.int64))
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
gs = training_util.get_or_create_global_step()
opt = adagrad_decay_v2.AdagradDecayOptimizerV2(0.1, gs)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
init = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
emb1, top, l = sess.run([emb, train_op, loss])
emb1, top, l = sess.run([emb, train_op, loss])
emb1, top, l = sess.run([emb, train_op, loss])
for val in emb1.tolist()[0]:
self.assertEqual(val, 1.0)
emb1, top, l = sess.run([emb, train_op, loss])
for val in emb1.tolist()[0]:
self.assertNotEqual(val, 1.0)
def testEmbeddingVariableForAdamFilter(self):
print("testEmbeddingVariableForAdamFilter")
var = variable_scope.get_embedding_variable("var_1",
embedding_dim = 3,
ev_option = variables.EmbeddingVariableOption(filter_option=variables.CounterFilter(filter_freq=3)),
initializer=init_ops.ones_initializer(dtypes.float32),
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=4))
emb = embedding_ops.embedding_lookup(var, math_ops.cast([1], dtypes.int64))
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
gs = training_util.get_or_create_global_step()
opt = adam.AdamOptimizer(0.1, gs)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
init = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
emb1, top, l = sess.run([emb, train_op, loss])
emb1, top, l = sess.run([emb, train_op, loss])
emb1, top, l = sess.run([emb, train_op, loss])
for val in emb1.tolist()[0]:
self.assertEqual(val, 1.0)
emb1, top, l = sess.run([emb, train_op, loss])
for val in emb1.tolist()[0]:
self.assertNotEqual(val, 1.0)
def testEmbeddingVariableForGradientDescent(self):
print("testEmbeddingVariableForGradientDescent")
with ops.device('/cpu:0'):
def runTestGradientDescent(self, var):
emb = embedding_ops.embedding_lookup(var, math_ops.cast([0,1,2,5,6,7], dtypes.int64))
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
gs = training_util.get_or_create_global_step()
opt = gradient_descent.GradientDescentOptimizer(0.1)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
init = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
return r
emb_var = variable_scope.get_embedding_variable("var_1",
embedding_dim = 3,
initializer=init_ops.ones_initializer(dtypes.float32),
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=4))
var = variable_scope.get_variable("var_2", shape=[100, 3], initializer=init_ops.ones_initializer(dtypes.float32))
emb1 = runTestGradientDescent(self, emb_var)
emb2 = runTestGradientDescent(self, var)
for i in range(0, 6):
for j in range(0, 3):
self.assertEqual(emb1.tolist()[i][j], emb2.tolist()[i][j])
def testEmbeddingVariableForAdagrad(self):
print("testEmbeddingVariableForAdagrad")
with ops.device('/cpu:0'):
def runTestAdagrad(self, var):
emb = embedding_ops.embedding_lookup(var, math_ops.cast([0,1,2,5,6,7], dtypes.int64))
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
gs = training_util.get_or_create_global_step()
opt = adagrad.AdagradOptimizer(0.1)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
init = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
return r
emb_var = variable_scope.get_embedding_variable("var_1",
embedding_dim = 3,
initializer=init_ops.ones_initializer(dtypes.float32),
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=1))
var = variable_scope.get_variable("var_2", shape=[100, 3], initializer=init_ops.ones_initializer(dtypes.float32))
emb1 = runTestAdagrad(self, emb_var)
emb2 = runTestAdagrad(self, var)
for i in range(0, 6):
for j in range(0, 3):
self.assertEqual(emb1.tolist()[i][j], emb2.tolist()[i][j])
def testEmbeddingVariableForAdagradDecay(self):
print("testEmbeddingVariableForAdagradDecay")
with ops.device('/cpu:0'):
def runTestAdagradDecay(self, var):
emb = embedding_ops.embedding_lookup(var, math_ops.cast([0,1,2,5,6,7], dtypes.int64))
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
gs = training_util.get_or_create_global_step()
opt = adagrad_decay.AdagradDecayOptimizer(0.1, gs)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
init = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
return r
emb_var = variable_scope.get_embedding_variable("var_1",
initializer=init_ops.ones_initializer(dtypes.float32),
embedding_dim = 3,
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=4))
var = variable_scope.get_variable("var_2", shape=[100, 3], initializer=init_ops.ones_initializer(dtypes.float32))
emb1 = runTestAdagradDecay(self, emb_var)
emb2 = runTestAdagradDecay(self, var)
for i in range(0, 6):
for j in range(0, 3):
self.assertEqual(emb1.tolist()[i][j], emb2.tolist()[i][j])
def testEmbeddingVariableForAdagradDecayV2(self):
print("testEmbeddingVariableForAdagradDecayV2")
with ops.device('/cpu:0'):
def runTestAdagradDecayV2(self, var):
emb = embedding_ops.embedding_lookup(var, math_ops.cast([0,1,2,5,6,7], dtypes.int64))
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
gs = training_util.get_or_create_global_step()
opt = adagrad_decay_v2.AdagradDecayOptimizerV2(0.1, gs)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
init = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
return r
emb_var = variable_scope.get_embedding_variable("var_1",
embedding_dim = 3,
initializer=init_ops.ones_initializer(dtypes.float32),
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=4))
var = variable_scope.get_variable("var_2", shape=[100, 3], initializer=init_ops.ones_initializer(dtypes.float32))
emb1 = runTestAdagradDecayV2(self, emb_var)
emb2 = runTestAdagradDecayV2(self, var)
for i in range(0, 6):
for j in range(0, 3):
self.assertEqual(emb1.tolist()[i][j], emb2.tolist()[i][j])
def testEmbeddingVariableForAdam(self):
print("testEmbeddingVariableForAdam")
with ops.device('/cpu:0'):
def runTestAdam(self, var):
emb = embedding_ops.embedding_lookup(var, math_ops.cast([0,1,2,5,6,7], dtypes.int64))
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
gs = training_util.get_or_create_global_step()
opt = adam.AdamOptimizer(0.1, gs)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
init = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
return r
emb_var = variable_scope.get_embedding_variable("var_1",
embedding_dim = 3,
initializer=init_ops.ones_initializer(dtypes.float32),
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=4))
var = variable_scope.get_variable("var_2", shape=[8, 3], initializer=init_ops.ones_initializer(dtypes.float32))
emb1 = runTestAdam(self, emb_var)
emb2 = runTestAdam(self, var)
print(emb1.tolist())
print(emb2.tolist())
for i in range(0, 6):
for j in range(0, 3):
self.assertAlmostEqual(emb1.tolist()[i][j], emb2.tolist()[i][j], delta=1e-05)
def testEmbeddingVariableForAdamAsync(self):
print("testEmbeddingVariableForAdamAsync")
with ops.device('/cpu:0'):
def runTestAdamAsync(self, var):
emb = embedding_ops.embedding_lookup(var, math_ops.cast([0,1,2,5,6,7], dtypes.int64))
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
gs = training_util.get_or_create_global_step()
opt = adam_async.AdamAsyncOptimizer(0.1)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
init = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
return r
emb_var = variable_scope.get_embedding_variable("var_1",
embedding_dim = 3,
initializer=init_ops.ones_initializer(dtypes.float32),
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=2))
var = variable_scope.get_variable("var_2", shape=[8, 3],
initializer=init_ops.ones_initializer(dtypes.float32))
emb1 = runTestAdamAsync(self, emb_var)
emb2 = runTestAdamAsync(self, var)
for i in range(0, 6):
for j in range(0, 3):
self.assertEqual(emb1.tolist()[i][j], emb2.tolist()[i][j])
def testEmbeddingVariableForFtrl(self):
print("testEmbeddingVariableForFtrl")
with ops.device('/cpu:0'):
def runTestAdam(self, var):
emb = embedding_ops.embedding_lookup(var, math_ops.cast([0,1,2,5,6,7], dtypes.int64))
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
gs = training_util.get_or_create_global_step()
opt = ftrl.FtrlOptimizer(0.1, l1_regularization_strength=2.0, l2_regularization_strength=0.00001)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
init = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
return r
emb_var = variable_scope.get_embedding_variable("var_1", embedding_dim=3,
initializer=init_ops.ones_initializer(dtypes.float32),
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=4))
var = variable_scope.get_variable("var_2", shape=[100, 3], initializer=init_ops.ones_initializer(dtypes.float32))
emb1 = runTestAdam(self, emb_var)
emb2 = runTestAdam(self, var)
#for i in range(0, 6):
# for j in range(0, 3):
# self.assertEqual(emb1.tolist()[i][j], emb2.tolist()[i][j])
def testEmbeddingVariableForAdagradDecayStep(self):
print("testEmbeddingVariableForAdagradDecayStep")
var = variable_scope.get_embedding_variable("var_1",
embedding_dim = 3,
initializer=init_ops.ones_initializer(dtypes.float32),
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=4))
emb = embedding_ops.embedding_lookup(var, math_ops.cast([0,1,2,5,6,7], dtypes.int64))
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
gs = training_util.get_or_create_global_step()
opt = adagrad_decay_v2.AdagradDecayOptimizerV2(0.1, gs, accumulator_decay_step=2)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v, global_step=gs)
init = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
self.assertEqual(36.0, sess.run([emb, train_op, loss])[2])
self.assertAlmostEqual(32.444176, sess.run([emb, train_op, loss])[2], delta=1e-05)
self.assertAlmostEqual(29.847788, sess.run([emb, train_op, loss])[2], delta=1e-05)
self.assertAlmostEqual(27.74195 , sess.run([emb, train_op, loss])[2], delta=1e-05)
self.assertAlmostEqual(25.852505, sess.run([emb, train_op, loss])[2], delta=1e-05)
def testEmbeddingVariableRestoreSavedModel(self):
checkpoint_directory = self.get_temp_dir() + "/save_model"
print("testEmbeddingVariableRestoreSavedModel")
# build graph
columns_list=[]
columns_list.append(feature_column.sparse_column_with_embedding(column_name="col_emb", dtype=dtypes.string))
W = feature_column.shared_embedding_columns(sparse_id_columns=columns_list,
dimension=3,
initializer=init_ops.ones_initializer(dtypes.float32),
shared_embedding_name="xxxxx_shared")
ids={}
ids["col_emb"] = sparse_tensor.SparseTensor(indices=[[0,0],[1,0],[2,0]], values=["aaaa","bbbbb","ccc"], dense_shape=[3, 5])
emb = feature_column_ops.input_from_feature_columns(columns_to_tensors=ids, feature_columns=W)
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
opt = ftrl.FtrlOptimizer(0.1, l1_regularization_strength=2.0, l2_regularization_strength=0.00001)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
gs = training_util.get_or_create_global_step()
init = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run(init)
builder = saved_model_builder.SavedModelBuilder(checkpoint_directory)
builder.add_meta_graph_and_variables(sess, ['tag_string'])
builder.save()
# load savedmodel
with self.test_session() as sess:
loader.load(sess, ['tag_string'], checkpoint_directory)
def testEmbeddingVariableForGeneralConstInitializer(self):
print("testEmbeddingVariableForGeneralConstInitializer")
var = variable_scope.get_embedding_variable("var_1",
embedding_dim = 3,
initializer=init_ops.ones_initializer(dtypes.float32),
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=4))
emb = embedding_ops.embedding_lookup(var, math_ops.cast([1,6], dtypes.int64))
init = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
emb_val = sess.run(emb)
for i in xrange(2):
for j in xrange(3):
self.assertAlmostEqual(1.0, emb_val[i][j], delta=1e-05)
def testEmbeddingVariableForGeneralRandomInitializer(self):
print("testEmbeddingVariableForGeneralRandomInitializer")
var = variable_scope.get_embedding_variable("var_1",
embedding_dim = 3,
#initializer=init_ops.ones_initializer(dtypes.float32),
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=4))
emb = embedding_ops.embedding_lookup(var, math_ops.cast([1,6], dtypes.int64))
init = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
emb_val = sess.run(emb)
print(emb_val)
for i in xrange(3):
self.assertNotEqual(emb_val[0][i], emb_val[1][i])
self.assertNotEqual(emb_val[0][i], emb_val[1][i])
self.assertNotEqual(emb_val[0][i], emb_val[1][i])
def testEmbeddingVariableForHTPartitionNum(self):
print("testEmbeddingVariableForHTPartitionNum")
ev_option = variables.EmbeddingVariableOption(ht_partition_num=20)
var = variable_scope.get_embedding_variable("var_1",
embedding_dim = 3,
initializer=init_ops.ones_initializer(dtypes.float32),
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=4),
ev_option=ev_option)
emb = embedding_ops.embedding_lookup(var, math_ops.cast([0,1,2,5,6,-7], dtypes.int64))
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
opt = ftrl.FtrlOptimizer(0.1, l1_regularization_strength=2.0, l2_regularization_strength=0.00001)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
init = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
print(sess.run([emb, train_op,loss]))
print(sess.run([emb, train_op,loss]))
print(sess.run([emb, train_op,loss]))
print(sess.run([emb, train_op,loss]))
print(sess.run([emb, train_op,loss]))
def testEmbeddingVariableForLayout(self):
print("testEmbeddingVariableForLayout")
def runTestAdagrad(self, var, g):
emb = embedding_ops.embedding_lookup(var, math_ops.cast([0,1,2,5,6,7], dtypes.int64))
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
gs = training_util.get_or_create_global_step()
opt = adagrad.AdagradOptimizer(0.1)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
init = variables.global_variables_initializer()
with self.test_session(graph=g) as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
return r
with ops.device('/cpu:0'), ops.Graph().as_default() as g:
emb_var = variable_scope.get_embedding_variable("var_1",
embedding_dim = 3,
initializer=init_ops.ones_initializer(dtypes.float32),
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=1))
var = variable_scope.get_variable("var_2", shape=[100, 3], initializer=init_ops.ones_initializer(dtypes.float32))
emb1 = runTestAdagrad(self, emb_var, g)
emb2 = runTestAdagrad(self, var, g)
for i in range(0, 6):
for j in range(0, 3):
self.assertEqual(emb1.tolist()[i][j], emb2.tolist()[i][j])
with ops.device('/cpu:0'), ops.Graph().as_default() as g:
emb_var = variable_scope.get_embedding_variable("var_1",
embedding_dim = 3,
initializer=init_ops.ones_initializer(dtypes.float32),
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=1),
steps_to_live=5)
var = variable_scope.get_variable("var_2", shape=[100, 3], initializer=init_ops.ones_initializer(dtypes.float32))
emb1 = runTestAdagrad(self, emb_var, g)
emb2 = runTestAdagrad(self, var, g)
for i in range(0, 6):
for j in range(0, 3):
self.assertEqual(emb1.tolist()[i][j], emb2.tolist()[i][j])
with ops.device('/cpu:0'), ops.Graph().as_default() as g:
emb_var = variable_scope.get_embedding_variable("var_1",
embedding_dim = 3,
initializer=init_ops.ones_initializer(dtypes.float32),
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=1),
ev_option = variables.EmbeddingVariableOption(filter_option=variables.CounterFilter(filter_freq=5)))
emb1 = runTestAdagrad(self, emb_var, g)
for i in range(0, 6):
for j in range(0, 3):
self.assertEqual(emb1.tolist()[i][j], 1.0)
def testEVInitializerWithKeyFetch(self):
print("testEVInitializerWithKeyFetch")
with ops.device('/cpu:0'), ops.Graph().as_default() as g:
var = variable_scope.get_variable("var", shape=[8,3],
initializer=init_ops.glorot_uniform_initializer(seed = 1))
init_opt = variables.InitializerOption(initializer=init_ops.glorot_uniform_initializer(seed = 1),
default_value_dim=8)
ev_option = variables.EmbeddingVariableOption(init_option=init_opt)
emb_var = variable_scope.get_embedding_variable("emb_var", embedding_dim=3,
ev_option=ev_option)
var_emb = embedding_ops.embedding_lookup(var, math_ops.cast([0,1,2,3,4,5,6,7], dtypes.int64))
emb_emb = embedding_ops.embedding_lookup(emb_var, math_ops.cast([0,1,2,5,6,7,8,9,10], dtypes.int64))
init = variables.global_variables_initializer()
with self.test_session(graph=g) as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
emb1 = sess.run(var_emb)
emb2 = sess.run(emb_emb)
self.assertListEqual(emb1.tolist()[0], emb2.tolist()[0])
self.assertListEqual(emb1.tolist()[1], emb2.tolist()[1])
self.assertListEqual(emb1.tolist()[2], emb2.tolist()[2])
self.assertListEqual(emb1.tolist()[5], emb2.tolist()[3])
self.assertListEqual(emb1.tolist()[6], emb2.tolist()[4])
self.assertListEqual(emb1.tolist()[7], emb2.tolist()[5])
self.assertListEqual(emb1.tolist()[0], emb2.tolist()[6])
self.assertListEqual(emb1.tolist()[1], emb2.tolist()[7])
self.assertListEqual(emb1.tolist()[2], emb2.tolist()[8])
def testEVInitializerWithCounterFeatureFilter(self):
def testembedding(emb1, emb2):
is_match = 0
for i in range(8):
for j in range(3):
if emb1.tolist()[i][j] != emb2.tolist()[3][j]:
break
if j == 2:
is_match = 1
return is_match
print("testEVInitializerWithCounterFeatureFilter")
with ops.device('/cpu:0'), ops.Graph().as_default() as g:
var = variable_scope.get_variable("var", shape=[8,3],
initializer=init_ops.glorot_uniform_initializer(seed = 1))
counter_filter_option=variables.CounterFilter(filter_freq=3)
init_opt = variables.InitializerOption(initializer=init_ops.glorot_uniform_initializer(seed = 1),
default_value_dim=8)
ev_option = variables.EmbeddingVariableOption(init_option=init_opt, filter_option=counter_filter_option)
emb_var = variable_scope.get_embedding_variable("emb_var", embedding_dim=3,
ev_option=ev_option)
var_emb = embedding_ops.embedding_lookup(var, math_ops.cast([0,1,2,3,4,5,6,7], dtypes.int64))
emb_emb = embedding_ops.embedding_lookup(emb_var, math_ops.cast([3], dtypes.int64))
init = variables.global_variables_initializer()
with self.test_session(graph=g) as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
emb1 = sess.run(var_emb)
emb2 = sess.run(emb_emb)
self.assertListEqual(emb1.tolist()[3], emb2.tolist()[0])
emb2 = sess.run(emb_emb)
self.assertListEqual(emb1.tolist()[3], emb2.tolist()[0])
emb2 = sess.run(emb_emb)
self.assertListEqual(emb1.tolist()[3], emb2.tolist()[0])
emb2 = sess.run(emb_emb)
self.assertListEqual(emb1.tolist()[3], emb2.tolist()[0])
def testEVInitializerWithBloomFeatureFilter(self):
def testembedding(emb1, emb2):
is_match = 0
for i in range(8):
for j in range(3):
if emb1.tolist()[i][j] != emb2.tolist()[0][j]:
break
if j == 2:
is_match = 1
return is_match
print("testEVInitializerWithBloomFeatureFilter")
with ops.device('/cpu:0'), ops.Graph().as_default() as g:
var = variable_scope.get_variable("var", shape=[8,3],
initializer=init_ops.glorot_uniform_initializer(seed = 1))
bloom_filter_option=variables.CBFFilter(
filter_freq=3,
max_element_size = 10,
false_positive_probability = 0.01)
init_opt = variables.InitializerOption(initializer=init_ops.glorot_uniform_initializer(seed = 1),
default_value_dim=8)
ev_option = variables.EmbeddingVariableOption(init_option=init_opt, filter_option=bloom_filter_option)
emb_var = variable_scope.get_embedding_variable("emb_var", embedding_dim=3,
ev_option=ev_option)
var_emb = embedding_ops.embedding_lookup(var, math_ops.cast([0,1,2,3,4,5,6,7], dtypes.int64))
emb_emb = embedding_ops.embedding_lookup(emb_var, math_ops.cast([3], dtypes.int64))
init = variables.global_variables_initializer()
with self.test_session(graph=g) as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
emb1 = sess.run(var_emb)
emb2 = sess.run(emb_emb)
self.assertListEqual(emb1.tolist()[3], emb2.tolist()[0])
emb2 = sess.run(emb_emb)
self.assertListEqual(emb1.tolist()[3], emb2.tolist()[0])
emb2 = sess.run(emb_emb)
self.assertListEqual(emb1.tolist()[3], emb2.tolist()[0])
emb2 = sess.run(emb_emb)
self.assertListEqual(emb1.tolist()[3], emb2.tolist()[0])
def testEVInitializer(self):
def runTest(self, var, g):
emb = embedding_ops.embedding_lookup(var, math_ops.cast([0,1,2,5,6,7], dtypes.int64))
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
gs = training_util.get_or_create_global_step()
opt = adagrad.AdagradOptimizer(0.1)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
init = variables.global_variables_initializer()
with self.test_session(graph=g) as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op, loss])
r, _, _ = sess.run([emb, train_op, loss])
r, _, _ = sess.run([emb, train_op, loss])
r, _, _ = sess.run([emb, train_op, loss])
return r
print("testEVInitializer")
with ops.device('/cpu:0'), ops.Graph().as_default() as g:
init = variables.InitializerOption(default_value_dim=8192)
ev_option = variables.EmbeddingVariableOption(init_option = init)
emb_var = variable_scope.get_embedding_variable("emb_var", embedding_dim = 6,
initializer=init_ops.glorot_uniform_initializer(seed = 3),
ev_option = ev_option)
var = variable_scope.get_variable("var", shape=[8192, 6],
initializer=init_ops.glorot_uniform_initializer(seed = 3))
emb1 = runTest(self, emb_var, g)
emb2 = runTest(self, var, g)
for i in range(0, 6):
for j in range(0, 6):
self.assertEqual(emb1.tolist()[i][j], emb2.tolist()[i][j])
def testEmbeddingVariableForDRAM(self):
print("testEmbeddingVariableForDRAM")
def runTestAdagrad(self, var, g):
emb = embedding_ops.embedding_lookup(var, math_ops.cast([0,1,2,5,6,7], dtypes.int64))
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
gs = training_util.get_or_create_global_step()
opt = adagrad.AdagradOptimizer(0.1)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
init = variables.global_variables_initializer()
with self.test_session(graph=g) as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
return r
with ops.device('/cpu:0'), ops.Graph().as_default() as g:
emb_var = variable_scope.get_embedding_variable("var_1",
embedding_dim = 3,
initializer=init_ops.ones_initializer(dtypes.float32),
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=1),
steps_to_live=5,
ev_option = variables.EmbeddingVariableOption(storage_option=variables.StorageOption(storage_type=config_pb2.StorageType.DRAM)))
var = variable_scope.get_variable("var_2", shape=[100, 3], initializer=init_ops.ones_initializer(dtypes.float32))
emb1 = runTestAdagrad(self, emb_var, g)
emb2 = runTestAdagrad(self, var, g)
for i in range(0, 6):
for j in range(0, 3):
self.assertEqual(emb1.tolist()[i][j], emb2.tolist()[i][j])
def testEmbeddingVariableForLEVELDBWithAdagrad(self):
print("testEmbeddingVariableForLEVELDBWithAdagrad")
db_directory = self.get_temp_dir()
def runTestAdagrad(self, var, g):
emb = embedding_ops.embedding_lookup(var, math_ops.cast([1, 1, 1, 2, 2, 3], dtypes.int64))
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
gs = training_util.get_or_create_global_step()
opt = adagrad.AdagradOptimizer(0.1)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
init = variables.global_variables_initializer()
with self.test_session(graph=g) as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
print(r)
return r
with ops.device('/cpu:0'), ops.Graph().as_default() as g:
emb_var = variable_scope.get_embedding_variable("var_1",
embedding_dim = 3,
initializer=init_ops.ones_initializer(dtypes.float32),
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=1),
steps_to_live=5,
ev_option = variables.EmbeddingVariableOption(storage_option=variables.StorageOption(storage_type=config_pb2.StorageType.LEVELDB,
storage_path=db_directory)))
var = variable_scope.get_variable("var_2", shape=[100, 3], initializer=init_ops.ones_initializer(dtypes.float32))
emb1 = runTestAdagrad(self, emb_var, g)
emb2 = runTestAdagrad(self, var, g)
for i in range(0, 6):
for j in range(0, 3):
self.assertEqual(emb1.tolist()[i][j], emb2.tolist()[i][j])
def testEmbeddingVariableForLEVELDBwithFilter(self):
print("testEmbeddingVariableForLEVELDBwithFilter")
db_directory = self.get_temp_dir()
var = variable_scope.get_embedding_variable("var_1",
embedding_dim = 3,
initializer=init_ops.ones_initializer(dtypes.float32),
ev_option = variables.EmbeddingVariableOption(filter_option=variables.CounterFilter(filter_freq=3),
storage_option=variables.StorageOption(storage_type=config_pb2.StorageType.LEVELDB,
storage_path=db_directory)),
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=1))
emb = embedding_ops.embedding_lookup(var, math_ops.cast([1], dtypes.int64))
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
gs = training_util.get_or_create_global_step()
opt = adagrad_decay.AdagradDecayOptimizer(0.1, gs)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
init = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
emb1, top, l = sess.run([emb, train_op, loss])
emb1, top, l = sess.run([emb, train_op, loss])
emb1, top, l = sess.run([emb, train_op, loss])
def testEmbeddingVariableForLEVELDBWithGradientDescent(self):
print("testEmbeddingVariableForLEVELDBWithGradientDescent")
db_directory = self.get_temp_dir()
def runTestAdagrad(self, var, g):
emb = embedding_ops.embedding_lookup(var, math_ops.cast([1, 1, 1, 2, 2, 3], dtypes.int64))
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
gs = training_util.get_or_create_global_step()
opt = gradient_descent.GradientDescentOptimizer(0.1)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
init = variables.global_variables_initializer()
with self.test_session(graph=g) as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
return r
with ops.device('/cpu:0'), ops.Graph().as_default() as g:
emb_var = variable_scope.get_embedding_variable("var_1",
embedding_dim = 3,
initializer=init_ops.ones_initializer(dtypes.float32),
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=1),
steps_to_live=5,
ev_option = variables.EmbeddingVariableOption(storage_option=variables.StorageOption(storage_type=config_pb2.StorageType.LEVELDB,
storage_path=db_directory)))
var = variable_scope.get_variable("var_2", shape=[100, 3], initializer=init_ops.ones_initializer(dtypes.float32))
emb1 = runTestAdagrad(self, emb_var, g)
emb2 = runTestAdagrad(self, var, g)
for i in range(0, 6):
for j in range(0, 3):
self.assertEqual(emb1.tolist()[i][j], emb2.tolist()[i][j])
def testEmbeddingVariableForLEVELDBWithAdam(self):
print("testEmbeddingVariableForLEVELDBWithAdam")
db_directory = self.get_temp_dir()
def runTestAdagrad(self, var, g):
emb = embedding_ops.embedding_lookup(var, math_ops.cast([1, 1, 1, 2, 2, 3], dtypes.int64))
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
gs = training_util.get_or_create_global_step()
opt = adam.AdamOptimizer(0.01)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
init = variables.global_variables_initializer()
with self.test_session(graph=g) as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
return r
with ops.device('/cpu:0'), ops.Graph().as_default() as g:
emb_var = variable_scope.get_embedding_variable("var_1",
embedding_dim = 3,
initializer=init_ops.ones_initializer(dtypes.float32),
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=1),
steps_to_live=5,
ev_option = variables.EmbeddingVariableOption(storage_option=variables.StorageOption(storage_type=config_pb2.StorageType.LEVELDB,
storage_path=db_directory)))
var = variable_scope.get_variable("var_2", shape=[100, 3], initializer=init_ops.ones_initializer(dtypes.float32))
emb1 = runTestAdagrad(self, emb_var, g)
emb2 = runTestAdagrad(self, var, g)
for i in range(0, 6):
for j in range(0, 3):
self.assertEqual(emb1.tolist()[i][j], emb2.tolist()[i][j])
def testEmbeddingVariableForLEVELDBWithAdamAsync(self):
print("testEmbeddingVariableForLEVELDBWithAdamAsync")
db_directory = self.get_temp_dir()
def runTestAdagrad(self, var, g):
emb = embedding_ops.embedding_lookup(var, math_ops.cast([1, 1, 1, 2, 2, 3], dtypes.int64))
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
gs = training_util.get_or_create_global_step()
opt = adam_async.AdamAsyncOptimizer(0.1)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
init = variables.global_variables_initializer()
with self.test_session(graph=g) as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
return r
with ops.device('/cpu:0'), ops.Graph().as_default() as g:
emb_var = variable_scope.get_embedding_variable("var_1",
embedding_dim = 3,
initializer=init_ops.ones_initializer(dtypes.float32),
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=1),
steps_to_live=5,
ev_option = variables.EmbeddingVariableOption(storage_option=variables.StorageOption(storage_type=config_pb2.StorageType.LEVELDB,
storage_path=db_directory)))
var = variable_scope.get_variable("var_2", shape=[100, 3], initializer=init_ops.ones_initializer(dtypes.float32))
emb1 = runTestAdagrad(self, emb_var, g)
emb2 = runTestAdagrad(self, var, g)
for i in range(0, 6):
for j in range(0, 3):
self.assertEqual(emb1.tolist()[i][j], emb2.tolist()[i][j])
def testEmbeddingVariableForLEVELDBWithAdagradDecay(self):
print("testEmbeddingVariableForLEVELDBWithAdagradDecay")
db_directory = self.get_temp_dir()
def runTestAdagrad(self, var, g):
emb = embedding_ops.embedding_lookup(var, math_ops.cast([1, 1, 1, 2, 2, 3], dtypes.int64))
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
gs = training_util.get_or_create_global_step()
opt = adagrad_decay.AdagradDecayOptimizer(0.1, gs)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
init = variables.global_variables_initializer()
with self.test_session(graph=g) as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
return r
with ops.device('/cpu:0'), ops.Graph().as_default() as g:
emb_var = variable_scope.get_embedding_variable("var_1",
embedding_dim = 3,
initializer=init_ops.ones_initializer(dtypes.float32),
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=1),
steps_to_live=5,
ev_option = variables.EmbeddingVariableOption(storage_option=variables.StorageOption(storage_type=config_pb2.StorageType.LEVELDB,
storage_path=db_directory)))
var = variable_scope.get_variable("var_2", shape=[100, 3], initializer=init_ops.ones_initializer(dtypes.float32))
emb1 = runTestAdagrad(self, emb_var, g)
emb2 = runTestAdagrad(self, var, g)
for i in range(0, 6):
for j in range(0, 3):
self.assertEqual(emb1.tolist()[i][j], emb2.tolist()[i][j])
def testEmbeddingVariableForLEVELDBWithAdagradDecayV2(self):
print("testEmbeddingVariableForLEVELDBWithAdagradDecayV2")
db_directory = self.get_temp_dir()
def runTestAdagrad(self, var, g):
emb = embedding_ops.embedding_lookup(var, math_ops.cast([1, 1, 1, 2, 2, 3], dtypes.int64))
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
gs = training_util.get_or_create_global_step()
opt = adagrad_decay_v2.AdagradDecayOptimizerV2(0.1, gs)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
init = variables.global_variables_initializer()
with self.test_session(graph=g) as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
return r
with ops.device('/cpu:0'), ops.Graph().as_default() as g:
emb_var = variable_scope.get_embedding_variable("var_1",
embedding_dim = 3,
initializer=init_ops.ones_initializer(dtypes.float32),
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=4),
steps_to_live=5,
ev_option = variables.EmbeddingVariableOption(storage_option=variables.StorageOption(storage_type=config_pb2.StorageType.LEVELDB,
storage_path=db_directory)))
var = variable_scope.get_variable("var_2", shape=[100, 3], initializer=init_ops.ones_initializer(dtypes.float32))
emb1 = runTestAdagrad(self, emb_var, g)
emb2 = runTestAdagrad(self, var, g)
for i in range(0, 6):
for j in range(0, 3):
self.assertEqual(emb1.tolist()[i][j], emb2.tolist()[i][j])
def testLevelDBCheckpoint(self):
db_directory = self.get_temp_dir()
checkpoint_directory = self.get_temp_dir()
emb_var = variable_scope.get_embedding_variable("var_1",
embedding_dim = 3,
initializer=init_ops.ones_initializer(dtypes.float32),
steps_to_live=5,
ev_option = variables.EmbeddingVariableOption(storage_option=variables.StorageOption(storage_type=config_pb2.StorageType.LEVELDB,
storage_path=db_directory)))
emb = embedding_ops.embedding_lookup(emb_var, math_ops.cast([1, 1, 1, 2, 2, 3], dtypes.int64))
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
gs = training_util.get_or_create_global_step()
opt = adagrad_decay_v2.AdagradDecayOptimizerV2(0.1, gs)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
init = variables.global_variables_initializer()
saver = saver_module.Saver()
model_path = os.path.join(checkpoint_directory, "model.ckpt")
with self.test_session() as sess:
sess.run([init])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
saver.save(sess, model_path)
r, _ = sess.run([emb, loss])
for name, shape in checkpoint_utils.list_variables(model_path):
if name == "var_1-values":
ckpt_value = checkpoint_utils.load_variable(model_path, name)
for j in range(0, 3):
self.assertEqual(ckpt_value.tolist()[0][j], r[0][j])
self.assertEqual(ckpt_value.tolist()[1][j], r[3][j])
self.assertEqual(ckpt_value.tolist()[2][j], r[5][j])
with self.test_session() as sess:
saver.restore(sess, model_path)
r1, _, _ = sess.run([emb, train_op,loss])
for i in range(0, 6):
for j in range(0, 3):
self.assertEqual(r[i][j], r1.tolist()[i][j])
def testEmbeddingVariableForSaveFreq(self):
db_directory = self.get_temp_dir()
checkpoint_directory = self.get_temp_dir()
emb_var = variable_scope.get_embedding_variable("var_1",
embedding_dim = 3,
initializer=init_ops.ones_initializer(dtypes.float32),
steps_to_live=5,
ev_option = variables.EmbeddingVariableOption(storage_option=variables.StorageOption(storage_type=config_pb2.StorageType.DRAM_LEVELDB,
storage_path=db_directory)))
emb = embedding_ops.embedding_lookup(emb_var, math_ops.cast([1, 1, 1, 2, 2, 3], dtypes.int64))
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
gs = training_util.get_or_create_global_step()
opt = adagrad_decay_v2.AdagradDecayOptimizerV2(0.1, gs)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
init = variables.global_variables_initializer()
saver = saver_module.Saver()
model_path = os.path.join(checkpoint_directory, "model.ckpt")
with self.test_session() as sess:
sess.run([init])
r, _, _ = sess.run([emb, train_op,loss])
r, _, _ = sess.run([emb, train_op,loss])
saver.save(sess, model_path)
r, _ = sess.run([emb, loss])
for name, shape in checkpoint_utils.list_variables(model_path):
if name == "var_1-freqs":
ckpt_value = checkpoint_utils.load_variable(model_path, name)
self.assertEqual(ckpt_value.tolist()[0], 6)
self.assertEqual(ckpt_value.tolist()[1], 4)
self.assertEqual(ckpt_value.tolist()[2], 2)
def testEmbeddingVariableForL2FeatureEvictionLevelDB(self):
print("testEmbeddingVariableForL2FeatureEvictionLevelDB")
checkpoint_directory = self.get_temp_dir()
db_directory = self.get_temp_dir()
evict = variables.L2WeightEvict(l2_weight_threshold=0.9)
storage_option = variables.StorageOption(storage_type=config_pb2.StorageType.LEVELDB,
storage_path=db_directory)
var = variable_scope.get_embedding_variable("var_1",
embedding_dim = 3,
initializer=init_ops.ones_initializer(dtypes.float32),
ev_option = variables.EmbeddingVariableOption(evict_option=evict, storage_option=storage_option))
emb = embedding_ops.embedding_lookup(var, math_ops.cast([0,0,0,1,1,2], dtypes.int64))
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
opt = ftrl.FtrlOptimizer(0.1, l1_regularization_strength=2.0, l2_regularization_strength=0.00001)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
saver = saver_module.Saver()
init = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
emb_ori = sess.run([emb, train_op])
save_path = saver.save(sess, os.path.join(checkpoint_directory, "model1.ckpt"), global_step=12345)
#for name, shape in checkpoint_utils.list_variables(checkpoint_directory):
# print('loading... ', name, shape)
with self.test_session() as sess:
saver.restore(sess, os.path.join(checkpoint_directory, "model1.ckpt-12345"))
emb_right = [[0.8282884, 0.8282884, 0.8282884],
[0.8282884, 0.8282884, 0.8282884],
[0.8282884, 0.8282884, 0.8282884],
[0.7927219, 0.7927219, 0.7927219],
[0.7927219, 0.7927219, 0.7927219],
[1.0, 1.0, 1.0]]
emb_ori = sess.run(emb)
for i in range(6):
for j in range(3):
self.assertAlmostEqual(emb_ori[i][j], emb_right[i][j])
def testEmbeddingVariableForDRAMAndLEVELDB(self):
print("testEmbeddingVariableForDRAMAndLEVELDB")
def runTestAdagrad(self, var, g):
#ids = array_ops.placeholder(dtypes.int64, name="ids")
#emb = embedding_ops.embedding_lookup(var, ids)
emb = embedding_ops.embedding_lookup(var, math_ops.cast([1, 1, 1, 2, 2, 3], dtypes.int64))
fun = math_ops.multiply(emb, 2.0, name='multiply')
loss = math_ops.reduce_sum(fun, name='reduce_sum')
gs = training_util.get_or_create_global_step()
opt = adagrad.AdagradOptimizer(0.1)
g_v = opt.compute_gradients(loss)
train_op = opt.apply_gradients(g_v)
init = variables.global_variables_initializer()
with self.test_session(graph=g) as sess:
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_VAR_OPS))
sess.run(ops.get_collection(ops.GraphKeys.EV_INIT_SLOT_OPS))
sess.run([init])
for i in xrange(60):
r, _, _ = sess.run([emb, train_op, loss])
return r
with ops.device('/cpu:0'), ops.Graph().as_default() as g:
db_directory = self.get_temp_dir()
emb_var = variable_scope.get_embedding_variable("var_1",
embedding_dim = 30,
initializer=init_ops.ones_initializer(dtypes.float32),
partitioner=partitioned_variables.fixed_size_partitioner(num_shards=1),
steps_to_live=5,
ev_option = variables.EmbeddingVariableOption(storage_option=variables.StorageOption(storage_type=config_pb2.StorageType.DRAM_LEVELDB,
storage_path=db_directory)))
emb1 = runTestAdagrad(self, emb_var, g)
with ops.device('/cpu:0'), ops.Graph().as_default() as g:
var = variable_scope.get_variable("var_2", shape=[100, 30], initializer=init_ops.ones_initializer(dtypes.float32))
emb2 = runTestAdagrad(self, var, g)
for i in range(0, 6):
for j in range(0, 30):
self.assertAlmostEqual(emb1.tolist()[i][j], emb2.tolist()[i][j])
if __name__ == "__main__":
googletest.main()
| 52.054167
| 198
| 0.66064
|
9537ba2e646d135fe4619f25ba07229d2e95a032
| 9,201
|
py
|
Python
|
examples/morphing/flight_conditions/classifying_optimal.py
|
leal26/pyXFOIL
|
88ff224be25cdb51eb821315f6e094f68fb13247
|
[
"MIT"
] | 50
|
2016-03-15T17:24:55.000Z
|
2021-12-28T07:32:45.000Z
|
examples/morphing/classifying_optimal.py
|
SzymonSzyszko/AeroPy
|
b061c690e5926fdd834b7c50837c25108e908156
|
[
"MIT"
] | 22
|
2017-04-20T11:27:28.000Z
|
2022-02-09T05:57:06.000Z
|
examples/morphing/classifying_optimal.py
|
SzymonSzyszko/AeroPy
|
b061c690e5926fdd834b7c50837c25108e908156
|
[
"MIT"
] | 34
|
2016-03-04T15:57:37.000Z
|
2022-02-15T20:06:54.000Z
|
import numpy as np
import matplotlib.pyplot as plt
from aeropy.geometry.airfoil import CST
from aeropy.morphing.camber_2D import *
import pandas
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin_min
# testing = 'structurally_consistent'
inverted = False
morphing_direction = 'forwards'
# ==============================================================================
# Inputs
# ==============================================================================
# Parameter
c_P = 1. # m
deltaz = 0.*c_P # m
# Avian wing, order 5
# Au_P = [0.23993240191629417, 0.34468227138908186, 0.18125405377549103,
# 0.35371349126072665, 0.2440815012119143, 0.25724974995738387]
# Al_P = [0.18889012559339036, -0.24686758992053115, 0.077569769493868401,
# -0.547827192265256, -0.0047342206759065641, -0.23994805474814629]
# NACA0012
Au_P = [0.1828, 0.1179, 0.2079, 0.0850, 0.1874]
Al_P = Au_P
# Passive shape coefficients for parent
# Au_P = [.5,.4,.3]
# Active shape coefficients for parent
# Al_P = [.5,.1,.1]
n = len(Au_P) - 1
if inverted:
temp = Au_P
Au_P = list(-np.array(Al_P))
Al_P = list(-np.array(temp))
# Passive shape coefficients for child
data = pandas.read_csv('optimal_map.csv')
# Spar position for cruise (adiminesional because the chord will still be calculated)
psi_spars = [0.1, 0.3, 0.6, 0.8]
# ==============================================================================
# Calculate dependent coefficients
# ==============================================================================
import pickle
f = open('design_optimal.p', 'rb')
designs = pickle.load(f)
f.close()
f = open('points_optimal.p', 'rb')
points = pickle.load(f)
f.close()
# points = []
# designs = []
# for i in range(len(data.values[:, 0])):
# print(i)
# AC_u = list(data.values[i, 0:4])
# Au_C, Al_C, c_C, spar_thicknesses = calculate_dependent_shape_coefficients(
# AC_u,
# psi_spars, Au_P, Al_P,
# deltaz, c_P, morphing=morphing_direction)
# x = np.linspace(0, c_C, 100)
# y = CST(x, c_C, deltasz=[deltaz/2., deltaz/2.], Al=Al_C, Au=Au_C)
# points.append(list(x) + list(y['l']) + list(y['u']))
# designs.append({'x': x, 'yl': y['l'], 'yu': y['u']})
# points = np.array(points)
# f = open('design_optimal.p', 'wb')
# pickle.dump(designs, f)
# f.close()
# f = open('points_optimal.p', 'wb')
# pickle.dump(points, f)
# f.close()
n_clusters = 4
# create kmeans object
kmeans = KMeans(n_clusters=n_clusters)
# fit kmeans object to data
kmeans.fit(points)
# save new clusters for chart
y_km = kmeans.fit_predict(points)
# Find designs closest to cluster
data = data.values
closest, _ = pairwise_distances_argmin_min(kmeans.cluster_centers_, points)
closest = closest[np.argsort(data[closest, -3])]
# colors = ['0.3', '0.5', '0.7']
colors = ['b', 'g', 'r', 'm', 'c']
plt.figure()
for ii in range(n_clusters):
i = y_km[closest[ii]]
plt.scatter(data[y_km == i, -3], data[y_km == i, -2], s=100, c=colors[ii], label=ii)
plt.scatter(data[closest, -3], data[closest, -2], s=100, c='k',
marker='s', label='Centers')
plt.xlabel(r'Angle of Attack (${}^\circ$)')
plt.ylabel('Velocity (m/s)')
plt.legend()
plt.show()
# ==============================================================================
# Plot results
# ==============================================================================
plt.figure()
np.set_printoptions(precision=20)
x_p = np.linspace(0, c_P, 100000)
y_p = CST(x_p, c_P, deltasz=[deltaz/2., deltaz/2.], Al=Al_P, Au=Au_P)
for ii in range(len(closest)):
i = closest[ii]
d = designs[i]
plt.plot(d['x'], d['yu'], colors[ii], label='%i' % ii, lw=2)
plt.plot(d['x'], d['yl'], colors[ii], label=None, lw=2)
plt.plot(x_p, y_p['u'], 'k--', label='Parent', lw=2)
plt.plot(x_p, y_p['l'], 'k--', label=None, lw=2)
plt.xlabel('$\psi^p$', fontsize=14)
plt.ylabel(r'$\zeta^p$', fontsize=14)
plt.ylim([-0.06, 0.17])
plt.grid()
plt.gca().set_aspect('equal', adjustable='box')
plt.legend(loc=1)
plt.show()
for ii in range(len(closest)):
i = closest[ii]
d = designs[i]
plt.figure()
AC_u = list(data[i, 0:4])
print(i, data[i])
Au_C, Al_C, c_C, spar_thicknesses = calculate_dependent_shape_coefficients(
AC_u,
psi_spars, Au_P, Al_P,
deltaz, c_P, morphing=morphing_direction)
x_c = np.linspace(0, c_C, 1000)
y_c = CST(x_c, c_C, deltasz=[deltaz/2., deltaz/2.], Al=Al_C, Au=Au_C)
plt.plot(d['x'], d['yu'], colors[ii], label='Children', lw=2)
plt.plot(d['x'], d['yl'], colors[ii], label=None, lw=2)
plt.plot(x_p, y_p['u'], 'k--', label='Parent', lw=2)
plt.plot(x_p, y_p['l'], 'k--', label=None, lw=2)
if morphing_direction == 'forwards':
psi_flats = []
intersections_x_children = [0]
intersections_y_children = [0]
intersections_x_parent = [0]
intersections_y_parent = [0]
for j in range(len(psi_spars)):
psi_parent_j = psi_spars[j]
# Calculate psi at landing
# psi_baseline, Au_baseline, Au_goal, deltaz, c_baseline, c_goal
psi_children_j = calculate_psi_goal(psi_parent_j, Au_P, Au_C, deltaz, c_P, c_C)
x_children_j = psi_children_j*c_C
# Calculate xi at landing
temp = CST(x_children_j, c_C, [deltaz/2., deltaz/2.], Al=Al_C, Au=Au_C)
y_children_j = temp['u']
s = calculate_spar_direction(psi_spars[j], Au_P, Au_C, deltaz, c_C)
# Print spars for children
if not inverted:
plt.plot([x_children_j, x_children_j - spar_thicknesses[j]*s[0]], [y_children_j,
y_children_j - spar_thicknesses[j]*s[1]], c=colors[ii], lw=2, label=None)
else:
plt.plot([x_children_j, x_children_j - spar_thicknesses[j]*s[0]],
[-y_children_j, -y_children_j + spar_thicknesses[j]*s[1]], c=colors[ii], lw=2, label=None)
psi_flats.append(x_children_j - spar_thicknesses[j]*s[0])
y = CST(np.array([psi_parent_j*c_P]), c_P,
deltasz=[deltaz/2., deltaz/2.], Al=Al_P, Au=Au_P)
intersections_x_children.append(x_children_j - spar_thicknesses[j]*s[0])
intersections_y_children.append(y_children_j - spar_thicknesses[j]*s[1])
# Print spars for parents
if not inverted:
plt.plot([psi_parent_j*c_P, psi_parent_j*c_P],
[y['u'], y['u']-spar_thicknesses[j]], 'k--', lw=2, label=None)
else:
plt.plot([psi_parent_j*c_P, psi_parent_j*c_P], [-y['u'], -
y['u']+spar_thicknesses[j]], 'k--', lw=2, label=None)
intersections_x_parent.append(psi_parent_j*c_P)
intersections_y_parent.append(y['u']-spar_thicknesses[j])
elif morphing_direction == 'backwards':
# For backwards, goal is the parent and deformed is children
for i in range(len(psi_spars)):
psi_i = psi_spars[i]
# Calculate psi at landing
psi_goal_i = calculate_psi_goal(psi_i, Au_C, Au_P, deltaz, c_C, c_P)
x_goal_i = psi_goal_i*c_P
# Calculate xi at landing
temp = CST(x_goal_i, c_P, [deltaz/2., deltaz/2.], Al=Al_P, Au=Au_P)
y_goal_i = temp['u']
# calculate spar direction
s = calculate_spar_direction(psi_i, Au_C, Au_P, deltaz, c_P)
plt.plot([x_goal_i, x_goal_i - spar_thicknesses[i]*s[0]],
[y_goal_i, y_goal_i - spar_thicknesses[i]*s[1]], 'k--')
y = CST(np.array([psi_i*c_C]), c_C, deltasz=[deltaz/2., deltaz/2.], Al=Al_C, Au=Au_C)
plt.plot([psi_i*c_C, psi_i*c_C], [y['u'], y['u'] -
spar_thicknesses[i]], colors[ii], lw=2, label=None)
plt.xlabel('$\psi^p$', fontsize=14)
plt.ylabel(r'$\zeta^p$', fontsize=14)
plt.ylim([-0.06, 0.17])
plt.grid()
plt.axis('off')
plt.gca().set_aspect('equal', adjustable='box')
# plt.legend(loc=1)
plt.show()
# if morphing_direction == 'forwards':
# print('chords', c_P, c_C)
# # Calculate initial lengths
# strains, av_strains = calculate_strains(Au_P, Al_P, c_P, Au_C, Al_C, c_C, deltaz, psi_spars)
#
# intersections_x_children.append(c_C)
# intersections_y_children.append(0)
# intersections_x_parent.append(c_P)
# intersections_y_parent.append(0)
# # Wire lengths
# for i in range(len(intersections_x_children)-1):
# length_parent = math.sqrt((intersections_x_parent[i]-intersections_x_parent[i+1])**2 +
# (intersections_y_parent[i]-intersections_y_parent[i+1])**2)
# length_children = math.sqrt((intersections_x_children[i]-intersections_x_children[i+1])**2 +
# (intersections_y_children[i]-intersections_y_children[i+1])**2)
# print((length_children-length_parent)/length_parent)
| 38.497908
| 156
| 0.572981
|
0477b7202250a22c6cee7e88ffa6325f793d45db
| 1,668
|
py
|
Python
|
basic_func.py
|
WilliamGong/traffic-analyze
|
687481f1f53d334a534f8cbdcc66bc2420d18f6c
|
[
"MIT"
] | null | null | null |
basic_func.py
|
WilliamGong/traffic-analyze
|
687481f1f53d334a534f8cbdcc66bc2420d18f6c
|
[
"MIT"
] | null | null | null |
basic_func.py
|
WilliamGong/traffic-analyze
|
687481f1f53d334a534f8cbdcc66bc2420d18f6c
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from functools import singledispatch
# solve Chinese font
plt.rcParams['font.sans-serif'] = ['KaiTi']
plt.rcParams['font.serif'] = ['KaiTi']
# plt.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题,或者转换负号为字符串
# init functions
def initNum(dic, ser):
""" This function is used to init the dictionary with index of
"非常愿意", "愿意", "一般", "不愿意", "非常不愿意".
Args:
dic (python dictionary): Num dictionary to storage data
ser (pandas.Series): series from source df.
"""
for i in ser:
if i == 1:
dic['非常愿意'] += 1
elif i == 2:
dic['愿意'] += 1
elif i == 3:
dic['一般'] += 1
elif i == 4:
dic['不愿意'] += 1
elif i == 5:
dic['非常不愿意'] += 1
def indexAvg(ser):
""" This function is used to calculate average point.
It used to the agree/disagree multiple choice.
Args:
ser (pandas.Series): Source data
Returns:
double: average point
precision is 2.
"""
length = ser.size
indexSum = 0
for i in ser:
indexSum += i
return round(indexSum / length, 2)
# draw functions
@singledispatch
def drawNumPie(dic, title):
sDic = pd.Series(dic, name=title)
sDic.plot.pie(autopct='%.2f')
plt.show()
@drawNumPie.register
def _(data: list, index, title):
sDic = pd.Series(data, index, name=title)
sDic.plot.pie(autopct='%.2f')
plt.show()
def drawNumBar(data, index, title):
sBar = pd.Series(data, index, name=title)
sBar.plot.bar()
plt.show()
| 23.492958
| 78
| 0.578537
|
5ea521228ef532e6838d2ee0eb76c0fbbfa93d60
| 451
|
py
|
Python
|
cursoemvideo/desafios/Desafio017.py
|
adinsankofa/python
|
8f2f26c77015c0baaa76174e004406b4115272c7
|
[
"MIT"
] | null | null | null |
cursoemvideo/desafios/Desafio017.py
|
adinsankofa/python
|
8f2f26c77015c0baaa76174e004406b4115272c7
|
[
"MIT"
] | null | null | null |
cursoemvideo/desafios/Desafio017.py
|
adinsankofa/python
|
8f2f26c77015c0baaa76174e004406b4115272c7
|
[
"MIT"
] | null | null | null |
'''
Faça um programa que leia o comprimento do cateto oposto e do cateto adjascente de um triângulo retângulo,
calcule e mostre o comprimento da hipotenusa.
'''
from math import hypot
oposto = float(input("Qual é o comprimento do cateto oposto: "))
adjascente = float(input("Qual é o comprimento do cateto oposto: "))
hi = hypot(oposto,adjascente)
hi1 = (oposto ** 2 + adjascente ** 2) ** (1/2)
print('O valor da hipotenusa é {:.2f}'.format(hi1))
| 30.066667
| 106
| 0.718404
|
8d0f29b3b7698a98256733d062d4871699add3bb
| 3,058
|
py
|
Python
|
model/rnn_with_syntax_model.py
|
gentaiscool/multi-task-cs-lm
|
7072752a8ebb73b42468b5ea373c4b9c77d03234
|
[
"MIT"
] | 10
|
2018-08-14T06:44:01.000Z
|
2021-02-02T01:36:49.000Z
|
model/rnn_with_syntax_model.py
|
gentaiscool/multi-task-cs-lm
|
7072752a8ebb73b42468b5ea373c4b9c77d03234
|
[
"MIT"
] | null | null | null |
model/rnn_with_syntax_model.py
|
gentaiscool/multi-task-cs-lm
|
7072752a8ebb73b42468b5ea373c4b9c77d03234
|
[
"MIT"
] | 2
|
2019-02-20T02:14:38.000Z
|
2021-09-11T14:11:12.000Z
|
import torch.nn as nn
import torch
from torch.autograd import Variable
import torch.nn.functional as F
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, rnn_type, ntoken, npostag, ninp, nhid, nlayers, npostagemb, dropout=0.5, tie_weights=False):
super(RNNModel, self).__init__()
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(ntoken, ninp)
self.postag_encoder = nn.Embedding(npostag, npostagemb)
if rnn_type in ['LSTM', 'GRU']:
self.rnn = getattr(nn, rnn_type)(ninp + npostagemb, nhid, nlayers, dropout=dropout)
else:
try:
nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[rnn_type]
except KeyError:
raise ValueError( """An invalid option for `--model` was supplied,
options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']""")
self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=nonlinearity, dropout=dropout)
self.decoder = nn.Linear(nhid, ntoken)
# Optionally tie weights as in:
# "Using the Output Embedding to Improve postaguage Models" (Press & Wolf 2016)
# https://arxiv.org/abs/1608.05859
# and
# "Tying Word Vectors and Word Classifiers: A Loss Framework for postaguage Modeling" (Inan et al. 2016)
# https://arxiv.org/abs/1611.01462
if tie_weights:
if nhid != ninp:
raise ValueError('When using the tied flag, nhid must be equal to emsize')
self.decoder.weight = self.encoder.weight
self.rnn_type = rnn_type
self.nhid = nhid
self.nlayers = nlayers
self.init_weights()
def init_weights(self):
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
self.postag_encoder.weight.data.uniform_(-initrange, initrange)
self.decoder.bias.data.fill_(0)
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, input, input_postag, hidden):
emb = self.drop(self.encoder(input))
postag_emb = self.drop(self.postag_encoder(input_postag))
emb_cat = torch.cat((emb, postag_emb), dim=2)
output, hidden = self.rnn(emb_cat, hidden)
output = self.drop(output)
decoded = F.log_softmax(self.decoder(output.view(output.size(0)*output.size(1), output.size(2))))
return decoded.view(output.size(0), output.size(1), decoded.size(1)), hidden
def init_hidden(self, bsz):
weight = next(self.parameters()).data
if self.rnn_type == 'LSTM':
return (Variable(weight.new(self.nlayers, bsz, self.nhid).zero_()),
Variable(weight.new(self.nlayers, bsz, self.nhid).zero_()))
else:
return Variable(weight.new(self.nlayers, bsz, self.nhid).zero_())
| 41.324324
| 115
| 0.626226
|
f5cade0728bc1e548e7740bd766cf36392d5c666
| 1,708
|
py
|
Python
|
server/server.py
|
Dachshund77/FlaskEwatson
|
5ca83d201ae3695974480f7bb84ce9261a973ee5
|
[
"MIT"
] | null | null | null |
server/server.py
|
Dachshund77/FlaskEwatson
|
5ca83d201ae3695974480f7bb84ce9261a973ee5
|
[
"MIT"
] | null | null | null |
server/server.py
|
Dachshund77/FlaskEwatson
|
5ca83d201ae3695974480f7bb84ce9261a973ee5
|
[
"MIT"
] | 2
|
2020-09-24T22:17:27.000Z
|
2020-10-09T22:46:28.000Z
|
# Main entry point for the server application
from flask import Flask
from flask import url_for
from flask import request as req
from datetime import datetime
from flask_json import FlaskJSON
from flask_json import json_response as res
from flask_cors import CORS
import logging
import time
import traceback
app = Flask(__name__)
app.config['JSON_SORT_KEYS'] = False
FlaskJSON(app)
CORS(app)
#Other routes, each endpoint should get its own file from server.api import sensors
from server.api import temperatures
from server.api import CO2
from server.api import humidities
from server.api import pressures
from server.api import sensors
# Error Handlers for app
@app.errorhandler(403)
def handler_403(e):
return res(403, error=str(e), time=datetime.utcnow())
@app.errorhandler(404)
def handler_404(e):
return res(404, error=str(e), time=datetime.utcnow())
@app.errorhandler(405)
def handler_405(e):
return res(405, error=str(e), time=datetime.utcnow())
@app.errorhandler(500)
def handler_500(e):
traceback.print_exc()
return res(500, error=str(e), time=datetime.utcnow())
@app.errorhandler(501)
def handler_501(e):
return res(501, error=str(e), time=datetime.utcnow())
@app.errorhandler(Exception)
def handler_default(e):
traceback.print_exc()
return res(500, error=str(e), time=datetime.utcnow())
#Base base and misc routes
#Identifier that this is the box
@app.route('/identity', methods=['GET'])
def base_identity():
logging.debug("Received request /identity")
return res(200, identifier="HelloSensorBox785179218796217896319", timeUTC=datetime.utcnow())
#Start up
def run():
#config for running dev, haven't looked at prod server yet
app.run(debug=False, host='0.0.0.0', port=5000)
| 27.111111
| 93
| 0.771663
|
1663b73ac2d3976c2675ecdcbc6be395c5324fc8
| 2,623
|
py
|
Python
|
py12306/helpers/OCR.py
|
mehrdad-shokri/py12306
|
e4f20350fe5f71d7a9f3f84ed2e15cdc6493bee7
|
[
"Apache-2.0"
] | 2
|
2019-01-15T07:34:30.000Z
|
2019-01-15T09:05:40.000Z
|
py12306/helpers/OCR.py
|
talisk/py12306
|
9346546dd9127b0eed43a313aaf0c86691375055
|
[
"Apache-2.0"
] | null | null | null |
py12306/helpers/OCR.py
|
talisk/py12306
|
9346546dd9127b0eed43a313aaf0c86691375055
|
[
"Apache-2.0"
] | 1
|
2021-07-29T06:30:25.000Z
|
2021-07-29T06:30:25.000Z
|
import math
import random
from py12306.config import Config
from py12306.helpers.api import *
from py12306.helpers.request import Request
from py12306.log.common_log import CommonLog
from py12306.vender.ruokuai.main import RKClient
class OCR:
"""
图片识别
"""
session = None
def __init__(self):
self.session = Request()
@classmethod
def get_img_position(cls, img):
"""
获取图像坐标
:param img_path:
:return:
"""
self = cls()
if Config().AUTO_CODE_PLATFORM == 'free':
return self.get_image_by_free_site(img)
return self.get_img_position_by_ruokuai(img)
def get_img_position_by_ruokuai(self, img):
ruokuai_account = Config().AUTO_CODE_ACCOUNT
soft_id = '119671'
soft_key = '6839cbaca1f942f58d2760baba5ed987'
rc = RKClient(ruokuai_account.get('user'), ruokuai_account.get('pwd'), soft_id, soft_key)
result = rc.rk_create(img, 6113)
if "Result" in result:
return self.get_image_position_by_offset(list(result['Result']))
CommonLog.print_auto_code_fail(result.get("Error", '-'))
return None
def get_image_position_by_offset(self, offsets):
positions = []
width = 75
height = 75
for offset in offsets:
random_x = random.randint(-5, 5)
random_y = random.randint(-5, 5)
offset = int(offset)
x = width * ((offset - 1) % 4 + 1) - width / 2 + random_x
y = height * math.ceil(offset / 4) - height / 2 + random_y
positions.append(int(x))
positions.append(int(y))
return positions
def get_image_by_free_site(self, img):
data = {
'base64': img
}
response = self.session.post(API_FREE_CODE_QCR_API, json=data)
result = response.json()
if result.get('success') and result.get('check'):
check_data = {
'check': result.get('check'),
'img_buf': img,
'logon': 1,
'type': 'D'
}
check_response = self.session.post(API_FREE_CODE_QCR_API_CHECK, json=check_data)
check_result = check_response.json()
if check_result.get('res'):
position = check_result.get('res')
return position.replace('(', '').replace(')', '').split(',')
CommonLog.print_auto_code_fail(CommonLog.MESSAGE_GET_RESPONSE_FROM_FREE_AUTO_CODE)
return None
if __name__ == '__main__':
pass
# code_result = AuthCode.get_auth_code()
| 31.60241
| 97
| 0.592451
|
6d06d6f2096cab88c3f1a19c13366df33d9e1dd1
| 109
|
py
|
Python
|
new1.py
|
kpmishraindia/wowmeter
|
6e7c681d5fbc0a69e973c241c9fc134f74f64b2d
|
[
"Apache-2.0"
] | null | null | null |
new1.py
|
kpmishraindia/wowmeter
|
6e7c681d5fbc0a69e973c241c9fc134f74f64b2d
|
[
"Apache-2.0"
] | null | null | null |
new1.py
|
kpmishraindia/wowmeter
|
6e7c681d5fbc0a69e973c241c9fc134f74f64b2d
|
[
"Apache-2.0"
] | null | null | null |
#new file as required
print ('ne1')
print ('ne2')
#C:\Users\kpmis\OneDrive\Documents\GitHub\wowmeter\new1.py
| 21.8
| 58
| 0.743119
|
8e38f20ec05de79332f8b76fed7213fc98c1f348
| 8,176
|
py
|
Python
|
pose_3d_model/mvn/utils/multiview.py
|
sony-si/pytorch-CycleGAN-and-pix2pix
|
4ebc64daa369037275c09013aec442a4412f736a
|
[
"MIT"
] | 2
|
2021-05-23T01:36:51.000Z
|
2021-06-21T13:20:53.000Z
|
pose_3d_model/mvn/utils/multiview.py
|
sony-si/pytorch-CycleGAN-and-pix2pix
|
4ebc64daa369037275c09013aec442a4412f736a
|
[
"MIT"
] | null | null | null |
pose_3d_model/mvn/utils/multiview.py
|
sony-si/pytorch-CycleGAN-and-pix2pix
|
4ebc64daa369037275c09013aec442a4412f736a
|
[
"MIT"
] | 2
|
2021-04-14T06:11:52.000Z
|
2021-06-21T13:21:03.000Z
|
"""
This file was copied from github.com/karfly/learnable-triangulation-pytorch and modified for this project needs.
The license of the file is in: github.com/karfly/learnable-triangulation-pytorch/blob/master/LICENSE
"""
import numpy as np
import torch
class Camera:
def __init__(self, R, t, K, dist=None, name=""):
self.R = np.array(R).copy()
assert self.R.shape == (3, 3)
self.t = np.array(t).copy()
assert self.t.size == 3
self.t = self.t.reshape(3, 1)
self.K = np.array(K).copy()
assert self.K.shape == (3, 3)
self.dist = dist
if self.dist is not None:
self.dist = np.array(self.dist).copy().flatten()
self.name = name
def update_after_crop(self, bbox):
left, upper, right, lower = bbox
cx, cy = self.K[0, 2], self.K[1, 2]
new_cx = cx - left
new_cy = cy - upper
self.K[0, 2], self.K[1, 2] = new_cx, new_cy
def update_after_resize(self, image_shape, new_image_shape):
height, width = image_shape
new_height, new_width = new_image_shape
fx, fy, cx, cy = self.K[0, 0], self.K[1, 1], self.K[0, 2], self.K[1, 2]
new_fx = fx * (new_width / width)
new_fy = fy * (new_height / height)
new_cx = cx * (new_width / width)
new_cy = cy * (new_height / height)
self.K[0, 0], self.K[1, 1], self.K[0, 2], self.K[1, 2] = new_fx, new_fy, new_cx, new_cy
def update_after_flip(self, image_shape, new_image_shape):
height, width = image_shape
new_height, new_width = new_image_shape
fx, fy, cx, cy = self.K[0, 0], self.K[1, 1], self.K[0, 2], self.K[1, 2]
new_fx = fx * (new_width / width)
new_fy = fy * (new_height / height)
new_cx = cx * (new_width / width)
new_cy = cy * (new_height / height)
self.K[0, 0], self.K[1, 1], self.K[0, 2], self.K[1, 2] = new_fx, new_fy, new_cx, new_cy
@property
def projection(self):
return self.K.dot(self.extrinsics)
@property
def extrinsics(self):
return np.hstack([self.R, self.t])
def euclidean_to_homogeneous(points):
"""Converts euclidean points to homogeneous
Args:
points numpy array or torch tensor of shape (N, M): N euclidean points of dimension M
Returns:
numpy array or torch tensor of shape (N, M + 1): homogeneous points
"""
if isinstance(points, np.ndarray):
return np.hstack([points, np.ones((len(points), 1))])
elif torch.is_tensor(points):
return torch.cat([points, torch.ones((points.shape[0], 1), dtype=points.dtype, device=points.device)], dim=1)
else:
raise TypeError("Works only with numpy arrays and PyTorch tensors.")
def homogeneous_to_euclidean(points):
"""Converts homogeneous points to euclidean
Args:
points numpy array or torch tensor of shape (N, M + 1): N homogeneous points of dimension M
Returns:
numpy array or torch tensor of shape (N, M): euclidean points
"""
if isinstance(points, np.ndarray):
return (points.T[:-1] / points.T[-1]).T
elif torch.is_tensor(points):
return (points.transpose(1, 0)[:-1] / points.transpose(1, 0)[-1]).transpose(1, 0)
else:
raise TypeError("Works only with numpy arrays and PyTorch tensors.")
def project_3d_points_to_image_plane_without_distortion(proj_matrix, points_3d, convert_back_to_euclidean=True):
"""Project 3D points to image plane not taking into account distortion
Args:
proj_matrix numpy array or torch tensor of shape (3, 4): projection matrix
points_3d numpy array or torch tensor of shape (N, 3): 3D points
convert_back_to_euclidean bool: if True, then resulting points will be converted to euclidean coordinates
NOTE: division by zero can be here if z = 0
Returns:
numpy array or torch tensor of shape (N, 2): 3D points projected to image plane
"""
if isinstance(proj_matrix, np.ndarray) and isinstance(points_3d, np.ndarray):
result = euclidean_to_homogeneous(points_3d) @ proj_matrix.T
if convert_back_to_euclidean:
result = homogeneous_to_euclidean(result)
return result
elif torch.is_tensor(proj_matrix) and torch.is_tensor(points_3d):
result = euclidean_to_homogeneous(points_3d) @ proj_matrix.t()
if convert_back_to_euclidean:
result = homogeneous_to_euclidean(result)
return result
else:
raise TypeError("Works only with numpy arrays and PyTorch tensors.")
def triangulate_point_from_multiple_views_linear(proj_matricies, points):
"""Triangulates one point from multiple (N) views using direct linear transformation (DLT).
For more information look at "Multiple view geometry in computer vision",
Richard Hartley and Andrew Zisserman, 12.2 (p. 312).
Args:
proj_matricies numpy array of shape (N, 3, 4): sequence of projection matricies (3x4)
points numpy array of shape (N, 2): sequence of points' coordinates
Returns:
point_3d numpy array of shape (3,): triangulated point
"""
assert len(proj_matricies) == len(points)
n_views = len(proj_matricies)
A = np.zeros((2 * n_views, 4))
for j in range(len(proj_matricies)):
A[j * 2 + 0] = points[j][0] * proj_matricies[j][2, :] - proj_matricies[j][0, :]
A[j * 2 + 1] = points[j][1] * proj_matricies[j][2, :] - proj_matricies[j][1, :]
u, s, vh = np.linalg.svd(A, full_matrices=False)
point_3d_homo = vh[3, :]
point_3d = homogeneous_to_euclidean(point_3d_homo)
return point_3d
def triangulate_point_from_multiple_views_linear_torch(proj_matricies, points, confidences=None):
"""Similar as triangulate_point_from_multiple_views_linear() but for PyTorch.
For more information see its documentation.
Args:
proj_matricies torch tensor of shape (N, 3, 4): sequence of projection matricies (3x4)
points torch tensor of of shape (N, 2): sequence of points' coordinates
confidences None or torch tensor of shape (N,): confidences of points [0.0, 1.0].
If None, all confidences are supposed to be 1.0
Returns:
point_3d numpy torch tensor of shape (3,): triangulated point
"""
assert len(proj_matricies) == len(points)
n_views = len(proj_matricies)
# print(n_views)
if confidences is None:
confidences = torch.ones(n_views, dtype=torch.float32, device=points.device)
A = proj_matricies[:, 2:3].expand(n_views, 2, 4) * points.view(n_views, 2, 1)
A -= proj_matricies[:, :2]
A *= confidences.view(-1, 1, 1)
u, s, vh = torch.svd(A.view(-1, 4))
point_3d_homo = -vh[:, 3]
point_3d = homogeneous_to_euclidean(point_3d_homo.unsqueeze(0))[0]
return point_3d
def triangulate_batch_of_points(proj_matricies_batch, points_batch, confidences_batch=None):
batch_size, n_views, n_joints = points_batch.shape[:3]
point_3d_batch = torch.zeros(batch_size, n_joints, 3, dtype=torch.float32, device=points_batch.device)
for batch_i in range(batch_size):
for joint_i in range(n_joints):
points = points_batch[batch_i, :, joint_i, :]
confidences = confidences_batch[batch_i, :, joint_i] if confidences_batch is not None else None
point_3d = triangulate_point_from_multiple_views_linear_torch(proj_matricies_batch[batch_i], points, confidences=confidences)
point_3d_batch[batch_i, joint_i] = point_3d
return point_3d_batch
def calc_reprojection_error_matrix(keypoints_3d, keypoints_2d_list, proj_matricies):
reprojection_error_matrix = []
for keypoints_2d, proj_matrix in zip(keypoints_2d_list, proj_matricies):
keypoints_2d_projected = project_3d_points_to_image_plane_without_distortion(proj_matrix, keypoints_3d)
reprojection_error = 1 / 2 * np.sqrt(np.sum((keypoints_2d - keypoints_2d_projected) ** 2, axis=1))
reprojection_error_matrix.append(reprojection_error)
return np.vstack(reprojection_error_matrix).T
| 38.566038
| 137
| 0.66206
|
61740af55412596458e6df29ffec0d7e4e30d90d
| 2,904
|
py
|
Python
|
app/recipe/tests/test_ingredients_api.py
|
ricardocastilloisc/curse-advance-django-docker
|
2b73b2512ec0bb0b60b8d78c030484f71ab51fef
|
[
"MIT"
] | null | null | null |
app/recipe/tests/test_ingredients_api.py
|
ricardocastilloisc/curse-advance-django-docker
|
2b73b2512ec0bb0b60b8d78c030484f71ab51fef
|
[
"MIT"
] | null | null | null |
app/recipe/tests/test_ingredients_api.py
|
ricardocastilloisc/curse-advance-django-docker
|
2b73b2512ec0bb0b60b8d78c030484f71ab51fef
|
[
"MIT"
] | null | null | null |
from os import name
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Ingredient
from recipe.serializers import IngredientSerializer
INGREDIENTS_URL = reverse('recipe:ingredient-list')
class PublicIngredientsApiTests(TestCase):
""""Test the public available ingredients API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
""""Test that login is required to access the endpoint"""
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTagsApiTests(TestCase):
""""Test the private ingredients API"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'test@londonappdev.com'
'testpass'
)
self.client.force_authenticate(self.user)
def test_retrieve_ingredient_list(self):
""""Test retrieving a list of ingredients"""
Ingredient.objects.create(user=self.user, name="Kale")
Ingredient.objects.create(user=self.user, name="Salt")
res = self.client.get(INGREDIENTS_URL)
ingredients = Ingredient.objects.all().order_by('-name')
serializer = IngredientSerializer(ingredients, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_ingredients_limited_to_user(self):
""""Test that ingredients for the authenticated user are returned"""
user2 = get_user_model().objects.create_user(
'other@londonappdev.com',
'test123'
)
Ingredient.objects.create(user=user2, name='Vinegar')
ingredient = Ingredient.objects.create(user=self.user, name='Tumeric')
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'],ingredient.name)
def test_create_ingredient_successful(self):
""""Test create a new ingredient"""
payload = {'name': 'Cabbage'}
self.client.post(INGREDIENTS_URL,payload)
exists = Ingredient.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_ingredient_invalid(self):
""""Test creating invalid ingredient fails"""
payload = {'name': ''}
res = self.client.post(INGREDIENTS_URL, payload)
self.assertEqual(res.status_code,status.HTTP_400_BAD_REQUEST)
| 33
| 78
| 0.649449
|
38c1f141037c8addde7e7893c2e34cfa06b2ec8c
| 5,726
|
py
|
Python
|
autotest/utilities/test_gdallocationinfo.py
|
rcoup/gdal
|
31240deb7b71d990a2abbad1bebedd0918989ca0
|
[
"MIT"
] | null | null | null |
autotest/utilities/test_gdallocationinfo.py
|
rcoup/gdal
|
31240deb7b71d990a2abbad1bebedd0918989ca0
|
[
"MIT"
] | null | null | null |
autotest/utilities/test_gdallocationinfo.py
|
rcoup/gdal
|
31240deb7b71d990a2abbad1bebedd0918989ca0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: gdallocationinfo testing
# Author: Even Rouault <even dot rouault @ mines-paris dot org>
#
###############################################################################
# Copyright (c) 2010-2013, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import sys
sys.path.append('../pymod')
sys.path.append('../gcore')
from osgeo import gdal
import gdaltest
import test_cli_utilities
###############################################################################
# Test basic usage
def test_gdallocationinfo_1():
if test_cli_utilities.get_gdallocationinfo_path() is None:
return 'skip'
(ret, err) = gdaltest.runexternal_out_and_err(test_cli_utilities.get_gdallocationinfo_path() + ' ../gcore/data/byte.tif 0 0')
if not (err is None or err == ''):
gdaltest.post_reason('got error/warning')
print(err)
return 'fail'
ret = ret.replace('\r\n', '\n')
expected_ret = """Report:
Location: (0P,0L)
Band 1:
Value: 107"""
if ret.find(expected_ret) != 0:
print(ret)
return 'fail'
return 'success'
###############################################################################
# Test -xml
def test_gdallocationinfo_2():
if test_cli_utilities.get_gdallocationinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_gdallocationinfo_path() + ' -xml ../gcore/data/byte.tif 0 0')
ret = ret.replace('\r\n', '\n')
expected_ret = """<Report pixel="0" line="0">
<BandReport band="1">
<Value>107</Value>
</BandReport>
</Report>"""
if ret.find(expected_ret) != 0:
print(ret)
return 'fail'
return 'success'
###############################################################################
# Test -valonly
def test_gdallocationinfo_3():
if test_cli_utilities.get_gdallocationinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_gdallocationinfo_path() + ' -b 1 -valonly ../gcore/data/byte.tif 0 0')
expected_ret = """107"""
if ret.find(expected_ret) != 0:
print(ret)
return 'fail'
return 'success'
###############################################################################
# Test -geoloc
def test_gdallocationinfo_4():
if test_cli_utilities.get_gdallocationinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_gdallocationinfo_path() + ' -geoloc ../gcore/data/byte.tif 440720.000 3751320.000')
ret = ret.replace('\r\n', '\n')
expected_ret = """Report:
Location: (0P,0L)
Band 1:
Value: 107"""
if ret.find(expected_ret) != 0:
print(ret)
return 'fail'
return 'success'
###############################################################################
# Test -lifonly
def test_gdallocationinfo_5():
if test_cli_utilities.get_gdallocationinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_gdallocationinfo_path() + ' -lifonly ../gcore/data/byte.vrt 0 0')
expected_ret1 = """../gcore/data/byte.tif"""
expected_ret2 = """../gcore/data\\byte.tif"""
if ret.find(expected_ret1) < 0 and ret.find(expected_ret2) < 0:
print(ret)
return 'fail'
return 'success'
###############################################################################
# Test -overview
def test_gdallocationinfo_6():
if test_cli_utilities.get_gdallocationinfo_path() is None:
return 'skip'
src_ds = gdal.Open('../gcore/data/byte.tif')
ds = gdal.GetDriverByName('GTiff').CreateCopy('tmp/test_gdallocationinfo_6.tif', src_ds)
ds.BuildOverviews('AVERAGE', overviewlist=[2])
ds = None
src_ds = None
ret = gdaltest.runexternal(test_cli_utilities.get_gdallocationinfo_path() + ' tmp/test_gdallocationinfo_6.tif 10 10 -overview 1')
gdal.GetDriverByName('GTiff').Delete('tmp/test_gdallocationinfo_6.tif')
expected_ret = """Value: 130"""
if ret.find(expected_ret) < 0:
print(ret)
return 'fail'
return 'success'
gdaltest_list = [
test_gdallocationinfo_1,
test_gdallocationinfo_2,
test_gdallocationinfo_3,
test_gdallocationinfo_4,
test_gdallocationinfo_5,
test_gdallocationinfo_6,
]
if __name__ == '__main__':
gdaltest.setup_run('test_gdallocationinfo')
gdaltest.run_tests(gdaltest_list)
sys.exit(gdaltest.summarize())
| 31.461538
| 137
| 0.605833
|
cc19d0059597e136e02cc560d9c4b3ee156b27f3
| 986
|
py
|
Python
|
movieresources/movieresources/urls.py
|
AlbertWh1te/MovieResources
|
ce2a5c3fce34e40e41399cd1404bb7c80f5483f3
|
[
"MIT"
] | null | null | null |
movieresources/movieresources/urls.py
|
AlbertWh1te/MovieResources
|
ce2a5c3fce34e40e41399cd1404bb7c80f5483f3
|
[
"MIT"
] | 3
|
2020-02-12T00:07:28.000Z
|
2021-06-10T19:46:23.000Z
|
movieresources/movieresources/urls.py
|
MarkWh1te/MovieResources
|
ce2a5c3fce34e40e41399cd1404bb7c80f5483f3
|
[
"MIT"
] | null | null | null |
"""movieresources URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from movies.views import index,init,MovieListView,SearchListView
urlpatterns = [
url(r'^init', init),
url(r'api/movieslist',MovieListView.as_view()),
url(r'api/searchlist',SearchListView.as_view()),
url(r'^guard', admin.site.urls),
url(r'', index),
]
| 36.518519
| 79
| 0.707911
|
f01d2e725b3246ab88b2cebfa762cc469ef0c6af
| 14,187
|
py
|
Python
|
legacy/backends/orchestrator/gcp/orchestrator_gcp_backend.py
|
ParikhKadam/zenml
|
867e4d4c982a50447bd182b30af37f2141dac5a4
|
[
"Apache-2.0"
] | 1,275
|
2020-11-19T14:18:25.000Z
|
2021-08-13T07:31:39.000Z
|
legacy/backends/orchestrator/gcp/orchestrator_gcp_backend.py
|
ParikhKadam/zenml
|
867e4d4c982a50447bd182b30af37f2141dac5a4
|
[
"Apache-2.0"
] | 62
|
2020-11-30T16:06:14.000Z
|
2021-08-10T08:34:52.000Z
|
legacy/backends/orchestrator/gcp/orchestrator_gcp_backend.py
|
ParikhKadam/zenml
|
867e4d4c982a50447bd182b30af37f2141dac5a4
|
[
"Apache-2.0"
] | 75
|
2020-12-22T19:15:08.000Z
|
2021-08-13T03:07:50.000Z
|
# Copyright (c) ZenML GmbH 2020. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Orchestrator for simple GCP VM backend"""
import base64
import json
import os
import time
from typing import Dict, Any
from typing import Text
import googleapiclient.discovery
from google.oauth2 import service_account as sa
from zenml.backends.orchestrator import OrchestratorBaseBackend
from zenml.repo import Repository
from zenml.standards import standard_keys as keys
from zenml.utils import path_utils
from zenml.utils import requirement_utils
from zenml.constants import ZENML_BASE_IMAGE_NAME, \
ZENML_TRAINER_IMAGE_NAME, GCP_ENTRYPOINT
from zenml.logger import get_logger
requirement_utils.check_integration(requirement_utils.GCP_INTEGRATION)
logger = get_logger(__name__)
EXTRACTED_TAR_DIR_NAME = 'zenml_working'
TAR_PATH_ARG = 'tar_path'
STAGING_AREA = 'staging'
class OrchestratorGCPBackend(OrchestratorBaseBackend):
"""
Orchestrates pipeline in a GCP Compute Instance.
This orchestrator creates a .tar.gz of the current ZenML repository, sends
it over to the artifact store, then launches a VM with the specified image.
After pipeline is done, the VM automatically gets brought down, regardless
of whether the pipeline failed or not. To see logs of the pipeline, use
Logs Explorer <https://console.cloud.google.com/logs/> and filter for
`logName="projects/<project_name>/logs/gcplogs-docker-driver"`. After
running the VM, the logger returns the link directly to the logs.
"""
def __init__(self,
project,
cloudsql_connection_name,
machine_type: Text = 'e2-medium',
gpu: Text = None,
gpu_count: int = 0,
zone: Text = 'europe-west1-b',
instance_name: Text = None,
disk_size: int = 100,
image: Text = None,
source_disk_image: Text = None,
preemptible: bool = True,
service_account: Text = None,
**kwargs):
"""
Initialize a GCP VM to orchestrate a pipeline. Users have the option
to run it with or without a GPU. In cases where a GPU is used,
the `image` and `source_disk_image` are both adapted to be
CUDA-compatible. This is convenient for the `TrainingPipeline`
especially.
Example:
a) Without GPU:
```
OrchestratorGCPBackend(
project='my_project_id',
cloudsql_connection_name='my_project_id:my_region:conn_name'
machine_type='e2-medium'
)
```
In the above case, a smaller `image` is used for the
orchestration of the pipeline, so the load-up time is faster. Use
for smaller datasets or where a GPU is not required for speed-up
training.
b) With GPU:
```
OrchestratorGCPBackend(
project='my_project_id',
cloudsql_connection_name='my_project_id:my_region:conn_name'
machine_type='n1-standard-4',
gpu='nvidia-tesla-k80',
)
```
Here, a large `image` is used for orchestration of the pipeline.
The attached k80 GPU is leveraged for faster training. Note that
not all machine_type are compatible with attached GPU! Make sure
to check Google Cloud Platform documentation for a full list.
Args:
project: GCP project_id.
cloudsql_connection_name: Cloud SQL instance name in the form
{GCP_PROJECT}:{GCP_REGION}:{GCP_CLOUD_SQL_INSTANCE_NAME}
gpu: (optional) GPU type to attach to VM. If gpu is specified,
default `image` and `source_disk_image` are both modified. Full
list of options [here](
https://cloud.google.com/compute/docs/gpus/create-vm-with-gpus
#gcloud_1)
zone: The zone where VM is launched.
instance_name: Name of the instance.
disk_size: Size (in GB) of disk to be used.
preemptible: Set True to use preemtible instance for reduced costs.
image: The image in which the pipeline actually runs.
machine_type: VM Machine type. Full list [here](
https://cloud.google.com/compute/docs/machine-types)
source_disk_image: The image of the underlying VM.
service_account: Optional path to service account json file.
"""
self.project = project
self.cloudsql_connection_name = cloudsql_connection_name
self.zone = zone
self.image = image
self.machine_type = machine_type
self.preemptible = preemptible
if instance_name is None:
self.instance_name = 'zenml-' + str(int(time.time()))
else:
self.instance_name = instance_name
self.gpu = gpu
self.gpu_count = gpu_count
if service_account:
scopes = ['https://www.googleapis.com/auth/cloud-platform']
self.credentials = \
sa.Credentials.from_service_account_file(
service_account, scopes=scopes)
else:
self.credentials = None
# Resolve images based on GPU
if image is None:
# use gpu image if a gpu is attached
if self.gpu is None:
self.image = ZENML_BASE_IMAGE_NAME
else:
self.image = ZENML_TRAINER_IMAGE_NAME
if source_disk_image is None:
compute = self._get_compute()
if self.gpu is None:
# get latest image from cos-85-lts family. As of Jan 28 2021
# it is: cos-85-13310-1041-38
image_response = compute.images().getFromFamily(
project='cos-cloud',
family='cos-85-lts').execute()
source_disk_image = image_response['selfLink']
else:
# get latest image from common-dl family. As of Jan 28 2021
# it is: 'c0-deeplearning-common-cu110-v20210121-debian-10'
image_response = compute.images().getFromFamily(
project='deeplearning-platform-release',
family='common-cu110').execute()
source_disk_image = image_response['selfLink']
self.source_disk_image = source_disk_image
self.disk_size = disk_size
super().__init__(
project=project,
cloudsql_connection_name=cloudsql_connection_name,
image=image,
zone=zone,
instance_name=instance_name,
machine_type=machine_type,
preemptible=preemptible,
service_account=service_account,
gpu=gpu,
disk_size=disk_size,
source_disk_image=source_disk_image,
gpu_count=gpu_count,
**kwargs,
)
def launch_instance(self, config: Dict[Text, Any]):
"""
This function launches a GCP compute instance.
Args:
config: a ZenML config dict
"""
# Instantiate google compute service
# Configure the machine
machine_type = f"zones/{self.zone}/machineTypes/{self.machine_type}"
s_script_name = 'startup-script-gpu.sh' if self.gpu else \
'startup-script.sh'
startup_script = open(os.path.join(
os.path.dirname(__file__), s_script_name), 'r').read()
config_encoded = base64.b64encode(json.dumps(config).encode())
c_params = f'python -m {GCP_ENTRYPOINT} run_pipeline --config_b64 ' \
f'{config_encoded}'
compute_config = {
"kind": "compute#instance",
'name': self.instance_name,
'zone': f'projects/{self.project}/zones/{self.zone}',
'machineType': machine_type,
"displayDevice": {
"enableDisplay": False
},
# Specify if preemtible
'scheduling': {'preemptible': self.preemptible},
# Specify the boot disk and the image to use as a source.
'disks': [
{
"kind": "compute#attachedDisk",
'boot': True,
'autoDelete': True,
'initializeParams': {
'sourceImage': self.source_disk_image,
'diskSizeGb': str(self.disk_size)
}
}
],
# Specify a network interface with NAT to access the public
# internet.
'networkInterfaces': [{
"kind": "compute#networkInterface",
'network': 'global/networks/default',
'accessConfigs': [
{"kind": "compute#accessConfig", 'type': 'ONE_TO_ONE_NAT',
'name': 'External NAT'}
]
}],
# Allow the instance to access cloud storage and logging.
"serviceAccounts": [
{
"email": "default",
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/sqlservice.admin"
]
}
],
# Metadata is readable from the instance and allows you to
# pass configuration from deployment scripts to instances.
'metadata': {
"kind": "compute#metadata",
'items': [
{
# Startup script is automatically executed by the
# instance upon startup.
'key': 'startup-script',
'value': startup_script
},
{
'key': 'image_name',
'value': self.image,
},
{
'key': 'container_params',
'value': c_params,
},
{
'key': 'mlmd_target',
'value': self.cloudsql_connection_name,
}
]
}
}
if self.gpu:
compute_config["guestAccelerators"] = [
{
"acceleratorCount": self.gpu_count,
"acceleratorType":
f"projects/{self.project}/zones/{self.zone}"
f"/acceleratorTypes/{self.gpu}"
}
]
logger.info(
f'Launching instance {self.instance_name} of type '
f'{self.machine_type} in project: {self.project} in zone '
f'{self.zone}')
try:
compute = self._get_compute()
res = compute.instances().insert(
project=self.project,
zone=self.zone,
body=compute_config).execute()
except Exception as e:
raise AssertionError(f"GCP VM failed to launch with the following "
f"error: {str(e)}")
logger.info(f'Launched instance {self.instance_name} with ID: '
f'{res["targetId"]}')
log_link = \
f'https://console.cloud.google.com/logs/query;query=logName%3D' \
f'%22projects%2F{self.project}%2Flogs%2Fgcplogs-docker-driver%22' \
f'%0Aresource.labels.instance_id%3D%22' \
f'{res["targetId"]}%22?' \
f'project={self.project}&folder=true&query=%0A'
logger.info(f"View logs at: {log_link}")
return res
def run(self, config: Dict[Text, Any]):
"""
This run function essentially calls an underlying TFX orchestrator run.
However it is meant as a higher level abstraction with some
opinionated decisions taken.
Args:
config: a ZenML config dict
"""
# Extract the paths to create the tar
logger.info('Orchestrating pipeline on GCP..')
repo: Repository = Repository.get_instance()
repo_path = repo.path
config_dir = repo.zenml_config.config_dir
tar_file_name = \
f'{EXTRACTED_TAR_DIR_NAME}_{str(int(time.time()))}.tar.gz'
path_to_tar = os.path.join(config_dir, tar_file_name)
# Create tarfile but exclude .zenml folder if exists
path_utils.create_tarfile(repo_path, path_to_tar)
logger.info(f'Created tar of current repository at: {path_to_tar}')
# Upload tar to artifact store
store_path = config[keys.GlobalKeys.ARTIFACT_STORE]
store_staging_area = os.path.join(store_path, STAGING_AREA)
store_path_to_tar = os.path.join(store_staging_area, tar_file_name)
path_utils.copy(path_to_tar, store_path_to_tar)
logger.info(f'Copied tar to artifact store at: {store_path_to_tar}')
# Remove tar
path_utils.rm_dir(path_to_tar)
logger.info(f'Removed tar at: {path_to_tar}')
# Append path of tar in config orchestrator utils
config[keys.GlobalKeys.BACKEND][keys.BackendKeys.ARGS][
TAR_PATH_ARG] = store_path_to_tar
# Launch the instance
self.launch_instance(config)
def _get_compute(self):
return googleapiclient.discovery.build(
'compute', 'v1', credentials=self.credentials)
| 38.762295
| 79
| 0.573835
|
8b69230585b02cbdbd192264ae906882409c5383
| 800
|
py
|
Python
|
api/migrations/0001_initial.py
|
mikolajczykb/groove-rooms
|
720d59fe91338ffe4096d07cfbad700c957b4415
|
[
"MIT"
] | null | null | null |
api/migrations/0001_initial.py
|
mikolajczykb/groove-rooms
|
720d59fe91338ffe4096d07cfbad700c957b4415
|
[
"MIT"
] | null | null | null |
api/migrations/0001_initial.py
|
mikolajczykb/groove-rooms
|
720d59fe91338ffe4096d07cfbad700c957b4415
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.1 on 2021-02-19 13:00
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Room',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(default='LkONM23p', max_length=8, unique=True)),
('host', models.CharField(max_length=50, unique=True)),
('guest_can_pause', models.BooleanField(default=False)),
('votes_to_skip', models.IntegerField(default=1)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
),
]
| 30.769231
| 114
| 0.5875
|
0da52c2342d23c295bf4f3ec5e1fc9e764afe12c
| 1,128
|
py
|
Python
|
tests/usecases/test_room_list_use_case.py
|
dbaeza24/mastering_python
|
7907459928dc886334f2ef6032b373fd4c1e0cfd
|
[
"MIT"
] | null | null | null |
tests/usecases/test_room_list_use_case.py
|
dbaeza24/mastering_python
|
7907459928dc886334f2ef6032b373fd4c1e0cfd
|
[
"MIT"
] | null | null | null |
tests/usecases/test_room_list_use_case.py
|
dbaeza24/mastering_python
|
7907459928dc886334f2ef6032b373fd4c1e0cfd
|
[
"MIT"
] | 1
|
2020-08-09T22:41:22.000Z
|
2020-08-09T22:41:22.000Z
|
import pytest
import uuid
from unittest import mock
from rentomatic.domain import room as r
from rentomatic.use_cases import room_list_use_case as uc
@pytest.fixture
def domain_rooms():
room_1 = r.Room(
code = uuid.uuid4(),
size=215,
price=39,
longitude=-0.09998975,
latitude=51.75436293,
)
room_2 = r.Room(
code = uuid.uuid4(),
size=405,
price=66,
longitude=0.182280006,
latitude=51.74640997,
)
room_3 = r.Room(
code = uuid.uuid4(),
size=56,
price=60,
longitude=-0.27891577,
latitude=51.45994096,
)
room_4 = r.Room(
code = uuid.uuid4(),
size=93,
price=48,
longitude=-0.33894476,
latitude=51.3996678,
)
return [room_1, room_2, room_3, room_4,]
def test_room_list_without_parameters(domain_rooms):
repo = mock.Mock()
repo.list.return_value = domain_rooms
room_list_use_case = uc.RoomListUseCase(repo)
result = room_list_use_case.execute()
repo.list.assert_called_with()
assert result == domain_rooms
| 23.020408
| 57
| 0.613475
|
8a89cc27e321dc156b751b6fd3f064b89c18da13
| 3,554
|
py
|
Python
|
back/investSimulator/users/serializers.py
|
Gabydelgado/Invest-Simulator
|
120706d0d48146b06e202df58403e5ea23cf53f3
|
[
"MIT"
] | null | null | null |
back/investSimulator/users/serializers.py
|
Gabydelgado/Invest-Simulator
|
120706d0d48146b06e202df58403e5ea23cf53f3
|
[
"MIT"
] | null | null | null |
back/investSimulator/users/serializers.py
|
Gabydelgado/Invest-Simulator
|
120706d0d48146b06e202df58403e5ea23cf53f3
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import get_user_model
from rest_auth.registration.serializers import RegisterSerializer
from rest_framework import serializers
from rest_framework.authtoken.models import Token
from . import models
try:
from allauth.account import app_settings as allauth_settings
from allauth.utils import (email_address_exists,
get_username_max_length)
from allauth.account.adapter import get_adapter
from allauth.account.utils import setup_user_email
from allauth.socialaccount.helpers import complete_social_login
from allauth.socialaccount.models import SocialAccount
from allauth.socialaccount.providers.base import AuthProcess
except ImportError:
raise ImportError("allauth needs to be added to INSTALLED_APPS.")
UserModel = get_user_model()
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = models.CustomUser
fields = ('id', 'username', 'avatar', 'cash', 'rank')
class UserProfileSerializer(serializers.ModelSerializer):
def update(self, user, validated_data):
user.email = validated_data.get('email', user.email)
user.first_name = validated_data.get('first_name', user.first_name)
user.last_name = validated_data.get('last_name', user.last_name)
user.avatar = validated_data.get('avatar', user.avatar)
user.save()
return user
class Meta:
model = models.CustomUser
fields = ('email', 'first_name', 'last_name', 'avatar',)
class UserDetailsSerializer(serializers.ModelSerializer):
"""
User model w/o password
"""
class Meta:
model = UserModel
fields = ('pk', 'username', 'avatar', 'cash', 'rank')
class TokenSerializer(serializers.ModelSerializer):
"""
Serializer for Token model.
"""
user = UserSerializer(many=False, read_only=True)
class Meta:
model = Token
fields = ('key', 'user',)
class MyRegisterSerializer(RegisterSerializer):
first_name = serializers.CharField(required=True, write_only=True)
last_name = serializers.CharField(required=True, write_only=True)
avatar = serializers.CharField(required=True, write_only=True)
username = serializers.CharField(
max_length=get_username_max_length(),
min_length=allauth_settings.USERNAME_MIN_LENGTH,
required=allauth_settings.USERNAME_REQUIRED
)
def get_cleaned_data(self):
return {
'first_name': self.validated_data.get('first_name', ''),
'username': self.validated_data.get('username', ''),
'last_name': self.validated_data.get('last_name', ''),
'password1': self.validated_data.get('password1', ''),
'email': self.validated_data.get('email', ''),
'avatar': self.validated_data.get('avatar', ''),
}
def custom_signup(self, request, user):
user.username = self.validated_data.get('username', '')
user.first_name = self.validated_data.get('first_name', '')
user.last_name = self.validated_data.get('last_name', '')
user.avatar = self.validated_data.get('avatar', '')
user.save(update_fields=['first_name', 'last_name', 'avatar'])
class RankingSerializer(serializers.Serializer):
"""La serializacion de ranking"""
username = serializers.CharField(max_length=None, min_length=None,
allow_blank=False, trim_whitespace=True)
wallet_quote = serializers.DecimalField(max_digits=10, decimal_places=2)
| 36.639175
| 77
| 0.689645
|
b12d39157c641ecc31aad9158baa850c87681ebd
| 315
|
py
|
Python
|
packages/w3af/w3af/plugins/auth/__init__.py
|
ZooAtmosphereGroup/HelloPackages
|
0ccffd33bf927b13d28c8f715ed35004c33465d9
|
[
"Apache-2.0"
] | 3
|
2019-04-09T22:59:33.000Z
|
2019-06-14T09:23:24.000Z
|
tools/w3af/w3af/plugins/auth/__init__.py
|
sravani-m/Web-Application-Security-Framework
|
d9f71538f5cba6fe1d8eabcb26c557565472f6a6
|
[
"MIT"
] | null | null | null |
tools/w3af/w3af/plugins/auth/__init__.py
|
sravani-m/Web-Application-Security-Framework
|
d9f71538f5cba6fe1d8eabcb26c557565472f6a6
|
[
"MIT"
] | null | null | null |
def get_long_description():
"""
:return: The description for the plugin type.
"""
return """Auth plugins make possible to scan authorization protected web applications.
They make login action in the beginning of the scan, logout - in the end
and check current session action regularly."""
| 31.5
| 90
| 0.711111
|
137a84faaff37a19da01085b232641f39bb43ee6
| 4,277
|
py
|
Python
|
test/tet_word_random_embedding.py
|
tolecy/Keras-TextClassification
|
34746c01ed3976deea108937e023bc6cd4037473
|
[
"MIT"
] | 1
|
2019-08-24T09:50:15.000Z
|
2019-08-24T09:50:15.000Z
|
test/tet_word_random_embedding.py
|
tolecy/Keras-TextClassification
|
34746c01ed3976deea108937e023bc6cd4037473
|
[
"MIT"
] | null | null | null |
test/tet_word_random_embedding.py
|
tolecy/Keras-TextClassification
|
34746c01ed3976deea108937e023bc6cd4037473
|
[
"MIT"
] | null | null | null |
# -*- coding: UTF-8 -*-
# !/usr/bin/python
# @time :2019/6/3 10:51
# @author :Mo
# @function :train of textcnn-word-random with baidu-qa-2019 in question title
# 适配linux
import pathlib
import sys
import os
project_path = str(pathlib.Path(os.path.abspath(__file__)).parent.parent.parent)
sys.path.append(project_path)
# 地址
from keras_textclassification.conf.path_config import path_model, path_fineture, path_model_dir, path_hyper_parameters
# 训练验证数据地址
from keras_textclassification.conf.path_config import path_baidu_qa_2019_train, path_baidu_qa_2019_valid
# 数据预处理, 删除文件目录下文件
from keras_textclassification.data_preprocess.text_preprocess import PreprocessText, delete_file
# 模型图
from keras_textclassification.m02_TextCNN.graph import TextCNNGraph as Graph
# 计算时间
import time
def train(hyper_parameters=None, rate=1.0):
if not hyper_parameters:
hyper_parameters = {
'len_max': 50, # 句子最大长度, 固定推荐20-50, bert越长会越慢, 占用空间也会变大, 本地win10-4G设为20就好, 过大小心OOM
'embed_size': 300, # 字/词向量维度, bert取768, word取300, char可以更小些
'vocab_size': 20000, # 这里随便填的,会根据代码里修改
'trainable': True, # embedding是静态的还是动态的, 即控制可不可以微调
'level_type': 'word', # 级别, 最小单元, 字/词, 填 'char' or 'word', 注意:word2vec模式下训练语料要首先切好
'embedding_type': 'random', # 级别, 嵌入类型, 还可以填'random'、 'bert' or 'word2vec"
'gpu_memory_fraction': 0.66, #gpu使用率
'model': {'label': 17, # 类别数
'batch_size': 32, # 批处理尺寸, 感觉原则上越大越好,尤其是样本不均衡的时候, batch_size设置影响比较大
'dropout': 0.5, # 随机失活, 概率
'decay_step': 100, # 学习率衰减step, 每N个step衰减一次
'decay_rate': 0.9, # 学习率衰减系数, 乘法
'epochs': 20, # 训练最大轮次
'patience': 3, # 早停,2-3就好
'lr': 1e-3, # 学习率,bert取5e-5,其他取1e-3, 对训练会有比较大的影响, 如果准确率一直上不去,可以考虑调这个参数
'l2': 1e-9, # l2正则化
'activate_classify': 'softmax', # 最后一个layer, 即分类激活函数
'loss': 'categorical_crossentropy', # 损失函数
'metrics': 'accuracy', # 保存更好模型的评价标准
'is_training': True, # 训练后者是测试模型
'model_path': path_model,
# 模型地址, loss降低则保存的依据, save_best_only=True, save_weights_only=True
'path_hyper_parameters': path_hyper_parameters, # 模型(包括embedding),超参数地址,
'path_fineture': path_fineture, # 保存embedding trainable地址, 例如字向量、词向量、bert向量等
},
'embedding': {'layer_indexes': [12], # bert取的层数
# 'corpus_path': '', # embedding预训练数据地址,不配则会默认取conf里边默认的地址, keras-bert可以加载谷歌版bert,百度版ernie(需转换,https://github.com/ArthurRizar/tensorflow_ernie),哈工大版bert-wwm(tf框架,https://github.com/ymcui/Chinese-BERT-wwm)
},
'data':{'train_data': path_baidu_qa_2019_train, # 训练数据
'val_data': path_baidu_qa_2019_valid # 验证数据
},
}
# 删除先前存在的模型和embedding微调模型等
delete_file(path_model_dir)
time_start = time.time()
# graph初始化
graph = Graph(hyper_parameters)
print("graph init ok!")
ra_ed = graph.word_embedding
# 数据预处理
pt = PreprocessText()
x_train, y_train = pt.preprocess_label_ques_to_idx(hyper_parameters['embedding_type'],
hyper_parameters['data']['train_data'],
ra_ed, rate=rate, shuffle=True)
x_val, y_val = pt.preprocess_label_ques_to_idx(hyper_parameters['embedding_type'],
hyper_parameters['data']['val_data'],
ra_ed, rate=rate, shuffle=True)
print("data propress ok!")
print(len(y_train))
# 训练
graph.fit(x_train, y_train, x_val, y_val)
print("耗时:" + str(time.time()-time_start))
if __name__=="__main__":
train(rate=0.01)
# 注意: 4G的080Ti的GPU、win10下batch_size=32,len_max=20, gpu<=0.87, 应该就可以bert-fineture了。
# 全量数据训练一轮(batch_size=32),就能达到80%准确率(验证集), 效果还是不错的
# win10下出现过错误,gpu、len_max、batch_size配小一点就好:ailed to allocate 3.56G (3822520832 bytes) from device: CUDA_ERROR_OUT_OF_MEMORY: out of memory
| 47.522222
| 231
| 0.607201
|
9477239a535a35ae430e05de5911a0f915c05e8c
| 10,034
|
py
|
Python
|
yolo_cam.py
|
IDayday/YOLOv4_CAM
|
8df61f1c59c197126f0385c1ec1cf65a29a80cec
|
[
"Apache-2.0"
] | 34
|
2021-04-16T12:38:13.000Z
|
2022-03-25T03:00:07.000Z
|
yolo_cam.py
|
IDayday/YOLOv4_CAM
|
8df61f1c59c197126f0385c1ec1cf65a29a80cec
|
[
"Apache-2.0"
] | 2
|
2021-12-25T14:49:33.000Z
|
2021-12-30T01:30:04.000Z
|
yolo_cam.py
|
IDayday/YOLOv4_CAM
|
8df61f1c59c197126f0385c1ec1cf65a29a80cec
|
[
"Apache-2.0"
] | 6
|
2021-04-17T03:11:13.000Z
|
2022-02-09T10:31:25.000Z
|
#-------------------------------------#
# 创建YOLO类
#-------------------------------------#
import colorsys
import os
import cv2
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
from PIL import Image, ImageDraw, ImageFont
from torch.autograd import Variable
from nets.yolo4 import YoloBody
from utils.utils import (DecodeBox, bbox_iou, letterbox_image,
non_max_suppression, yolo_correct_boxes)
#--------------------------------------------#
# 使用自己训练好的模型预测需要修改2个参数
# model_path和classes_path都需要修改!
# 如果出现shape不匹配,一定要注意
# 训练时的model_path和classes_path参数的修改
#--------------------------------------------#
class YOLO(object):
_defaults = {
"model_path" : 'model_data/Epoch102-Total_Loss11.0130-Val_Loss8.8086.pth',
"anchors_path" : 'model_data/yolo_anchors.txt',
"classes_path" : 'model_data/helmet_classes.txt',
"model_image_size" : (416, 416, 3),
"confidence" : 0.5,
"iou" : 0.3,
"cuda" : False,
#---------------------------------------------------------------------#
# 该变量用于控制是否使用letterbox_image对输入图像进行不失真的resize,
# 在多次测试后,发现关闭letterbox_image直接resize的效果更好
#---------------------------------------------------------------------#
"letterbox_image" : False,
}
@classmethod
def get_defaults(cls, n):
if n in cls._defaults:
return cls._defaults[n]
else:
return "Unrecognized attribute name '" + n + "'"
#---------------------------------------------------#
# 初始化YOLO
#---------------------------------------------------#
def __init__(self, **kwargs):
self.__dict__.update(self._defaults)
self.class_names = self._get_class()
self.anchors = self._get_anchors()
self.generate()
#---------------------------------------------------#
# 获得所有的分类
#---------------------------------------------------#
def _get_class(self):
classes_path = os.path.expanduser(self.classes_path)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
#---------------------------------------------------#
# 获得所有的先验框
#---------------------------------------------------#
def _get_anchors(self):
anchors_path = os.path.expanduser(self.anchors_path)
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape([-1, 3, 2])[::-1,:,:]
#---------------------------------------------------#
# 生成模型
#---------------------------------------------------#
def generate(self):
#---------------------------------------------------#
# 建立yolov4模型
#---------------------------------------------------#
self.net = YoloBody(len(self.anchors[0]), len(self.class_names)).eval()
#---------------------------------------------------#
# 载入yolov4模型的权重
#---------------------------------------------------#
print('Loading weights into state dict...')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
state_dict = torch.load(self.model_path, map_location=device)
self.net.load_state_dict(state_dict)
print('Finished!')
if self.cuda:
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
self.net = nn.DataParallel(self.net)
self.net = self.net.cuda()
#---------------------------------------------------#
# 建立三个特征层解码用的工具
#---------------------------------------------------#
self.yolo_decodes = []
for i in range(3):
self.yolo_decodes.append(DecodeBox(self.anchors[i], len(self.class_names), (self.model_image_size[1], self.model_image_size[0])))
print('{} model, anchors, and classes loaded.'.format(self.model_path))
# 画框设置不同的颜色
hsv_tuples = [(x / len(self.class_names), 1., 1.)
for x in range(len(self.class_names))]
self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
self.colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
self.colors))
#---------------------------------------------------#
# 检测图片
#---------------------------------------------------#
def detect_image(self, image):
image_shape = np.array(np.shape(image)[0:2])
#---------------------------------------------------------#
# 给图像增加灰条,实现不失真的resize
# 也可以直接resize进行识别
#---------------------------------------------------------#
if self.letterbox_image:
crop_img = np.array(letterbox_image(image, (self.model_image_size[1],self.model_image_size[0])))
else:
crop_img = image.convert('RGB')
crop_img = crop_img.resize((self.model_image_size[1],self.model_image_size[0]), Image.BICUBIC)
photo = np.array(crop_img,dtype = np.float32) / 255.0
photo = np.transpose(photo, (2, 0, 1))
#---------------------------------------------------------#
# 添加上batch_size维度
#---------------------------------------------------------#
images = [photo]
with torch.no_grad():
images = torch.from_numpy(np.asarray(images))
if self.cuda:
images = images.cuda()
#---------------------------------------------------------#
# 将图像输入网络当中进行预测!
#---------------------------------------------------------#
outputs = self.net(images)
output_list = []
for i in range(3):
output_list.append(self.yolo_decodes[i](outputs[i]))
return output_list
def show_CAM(image_path, feature_maps, class_id, all_ids=10, show_one_layer=True):
"""
feature_maps: this is a list [tensor,tensor,tensor], tensor shape is [1, 3, N, N, all_ids]
"""
SHOW_NAME = ["score", "class", "class_score"]
img_ori = cv2.imread(image_path)
layers0 = feature_maps[0].reshape([-1, all_ids])
layers1 = feature_maps[1].reshape([-1, all_ids])
layers2 = feature_maps[2].reshape([-1, all_ids])
layers = torch.cat([layers0, layers1, layers2], 0)
score_max_v = layers[:, 4].max() # compute max of score from all anchor
score_min_v = layers[:, 4].min() # compute min of score from all anchor
class_max_v = layers[:, 5 + class_id].max() # compute max of class from all anchor
class_min_v = layers[:, 5 + class_id].min() # compute min of class from all anchor
all_ret = [[],[],[]]
for j in range(3): # layers
layer_one = feature_maps[j]
# compute max of score from three anchor of the layer
anchors_score_max = layer_one[0, ..., 4].max(0)[0]
# compute max of class from three anchor of the layer
anchors_class_max = layer_one[0, ..., 5 + class_id].max(0)[0]
scores = ((anchors_score_max - score_min_v) / (
score_max_v - score_min_v))
classes = ((anchors_class_max - class_min_v) / (
class_max_v - class_min_v))
layer_one_list = []
layer_one_list.append(scores)
layer_one_list.append(classes)
layer_one_list.append(scores*classes)
for idx, one in enumerate(layer_one_list):
layer_one = one.cpu().numpy()
ret = ((layer_one - layer_one.min()) / (layer_one.max() - layer_one.min())) * 255
ret = ret.astype(np.uint8)
gray = ret[:, :, None]
ret = cv2.applyColorMap(gray, cv2.COLORMAP_JET)
if not show_one_layer:
all_ret[j].append(cv2.resize(ret, (img_ori.shape[1], img_ori.shape[0])).copy())
else:
ret = cv2.resize(ret, (img_ori.shape[1], img_ori.shape[0]))
show = ret * 0.8 + img_ori * 0.2
show = show.astype(np.uint8)
cv2.imshow(f"one_{SHOW_NAME[idx]}", show)
cv2.imwrite('./cam_results/head'+str(j)+'layer'+str(idx)+SHOW_NAME[idx]+".jpg", show)
# cv2.imshow(f"map_{SHOW_NAME[idx]}", ret)
if show_one_layer:
cv2.waitKey(0)
if not show_one_layer:
for idx, one_type in enumerate(all_ret):
map_show = one_type[0] / 3 + one_type[1] / 3 + one_type[2] / 3
show = map_show * 0.8 + img_ori * 0.2
show = show.astype(np.uint8)
map_show = map_show.astype(np.uint8)
cv2.imshow(f"all_{SHOW_NAME[idx]}", show)
cv2.imwrite('./cam_results/head_cont'+str(idx)+SHOW_NAME[idx]+".jpg", show)
# cv2.imshow(f"map_{SHOW_NAME[idx]}", map_show)
cv2.waitKey(0)
ret = []
stride = [13,26,52]
yolo = YOLO()
path = 'img/00148.jpg'
image = Image.open(path)
output_list = yolo.detect_image(image)
for i,f in enumerate(output_list):
ret.append(f.reshape(1,3,stride[i],stride[i],10))
# features1 = torch.randn(1,3,13,13,10)
# features2 = torch.randn(1,3,26,26,10)
# features3 = torch.randn(1,3,52,52,10)
show_CAM(path, ret, 1)
| 43.437229
| 143
| 0.454654
|
6993dd59788e3130067a159e016e47163be29557
| 1,626
|
py
|
Python
|
share/rpcauth/rpcauth.py
|
elestranobaron/litecoin
|
1757dde281649c24321c528ef79680897b7ce094
|
[
"MIT"
] | null | null | null |
share/rpcauth/rpcauth.py
|
elestranobaron/litecoin
|
1757dde281649c24321c528ef79680897b7ce094
|
[
"MIT"
] | null | null | null |
share/rpcauth/rpcauth.py
|
elestranobaron/litecoin
|
1757dde281649c24321c528ef79680897b7ce094
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from argparse import ArgumentParser
from base64 import urlsafe_b64encode
from binascii import hexlify
from getpass import getpass
from os import urandom
import hmac
def generate_salt(size):
"""Create size byte hex salt"""
return hexlify(urandom(size)).decode()
def generate_password():
"""Create 32 byte b64 password"""
return urlsafe_b64encode(urandom(32)).decode('utf-8')
def password_to_hmac(salt, password):
m = hmac.new(bytearray(salt, 'utf-8'), bytearray(password, 'utf-8'), 'SHA256')
return m.hexdigest()
def main():
parser = ArgumentParser(description='Create login credentials for a JSON-RPC user')
parser.add_argument('username', help='the username for authentication')
parser.add_argument('password', help='leave empty to generate a random password or specify "-" to prompt for password', nargs='?')
args = parser.parse_args()
if not args.password:
args.password = generate_password()
elif args.password == '-':
args.password = getpass()
# Create 16 byte hex salt
salt = generate_salt(16)
password_hmac = password_to_hmac(salt, args.password)
print('String to be appended to muscleupcoin.conf:')
print('rpcauth={0}:{1}${2}'.format(args.username, salt, password_hmac))
print('Your password:\n{0}'.format(args.password))
if __name__ == '__main__':
main()
| 34.595745
| 135
| 0.694957
|
fdb48a1809265afbf2b611983a4ab7a8ea6e659a
| 20,372
|
py
|
Python
|
model.py
|
smarsu/mtcnn
|
98c3839e250b18c310efa920bc6289a70379f07d
|
[
"MIT"
] | null | null | null |
model.py
|
smarsu/mtcnn
|
98c3839e250b18c310efa920bc6289a70379f07d
|
[
"MIT"
] | null | null | null |
model.py
|
smarsu/mtcnn
|
98c3839e250b18c310efa920bc6289a70379f07d
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------
# MTCNN
# Licensed under The MIT License [see LICENSE for details]
# Copyright 2019 smarsu. All Rights Reserved.
# --------------------------------------------------------
"""Implementation of MTCNN in SMNet.
Reference:
Joint Face Detection and Alignment using Multi-task Cascaded Convolutional Networks
https://arxiv.org/abs/1604.02878
"""
import numpy as np
import smnet as sm
import tensorflow as tf
def pnet(x):
"""PNet in MTCNN.
Args:
x: The input tensor. (n, h, w, 3)
Returns:
conf: The output confidence tensor.
box: The output box tensor.
landmark: The output landmark tensor.
"""
# smnet-gpu only support NCHW in conv and pool
# smnet-cpu only support NHWC in conv and pool
if sm.net.use_cuda:
x = sm.transform(x, 'NHWC2NCHW')
x = sm.slim.conv2d(x, 3, 10, 3, 1, padding='VALID')
x = sm.max_pool(x, (1, 3, 3, 1), (1, 2, 2, 1), padding='SAME')
x = sm.slim.conv2d(x, 10, 16, 3, 1, padding='VALID')
x = sm.slim.conv2d(x, 16, 32, 3, 1, padding='VALID')
x = sm.slim.conv2d(x, 32, 16, 1, 1, padding='VALID', act=None)
if sm.net.use_cuda:
x = sm.transform(x, 'NCHW2NHWC')
conf, box, landmark = sm.split(x, (2, 6, 16), axis=-1)
return conf, box, landmark
def pnet_loss(conf, box, landmark, gt_conf, gt_box, gt_landmark, conf_mask,
box_mask, landmark_mask):
"""Create the loss of pnet.
Args:
conf, box, landmark: The output of pnet.
gt_conf, gt_box, gt_landmark: ground truth labels.
conf_mask, box_mask, landmark_mask: to balance the three loss
"""
conf_loss = conf_mask * sm.softmax_cross_entropy_with_logits(labels=gt_conf,
logits=conf)
box_loss = box_mask * sm.hse(gt_box, box)
landmark_loss = landmark_mask * sm.hse(gt_landmark, landmark)
return conf_loss, box_loss, landmark_loss
def rnet(x):
"""RNet in MTCNN.
Args:
x: The input tensor. (n, 24, 24, 3)
"""
if sm.net.use_cuda:
x = sm.transform(x, 'NHWC2NCHW')
x = sm.slim.conv2d(x, 3, 28, 3, 1, padding='VALID', border=0.01)
x = sm.max_pool(x, (1, 3, 3, 1), (1, 2, 2, 1), padding='SAME')
x = sm.slim.conv2d(x, 28, 48, 3, 1, padding='VALID', border=0.01)
x = sm.max_pool(x, (1, 3, 3, 1), (1, 2, 2, 1), padding='VALID')
x = sm.slim.conv2d(x, 48, 64, 2, 1, padding='VALID', border=0.01)
# use conv2d to replace full-connect
x = sm.slim.conv2d(x, 64, 128, 3, 1, padding='VALID', border=0.01)
x = sm.slim.conv2d(x, 128, 16, 1, 1, padding='VALID', act=None, border=0.01)
if sm.net.use_cuda:
x = sm.transform(x, 'NCHW2NHWC')
#x = sm.reshape(x, (-1, 3 * 3 * 64))
#x = sm.slim.fc(x, 3 * 3 * 64, 128, bias=True)
#x = sm.slim.fc(x, 128, 16, bias=True, act=None)
x = sm.reshape(x, (-1, 16))
conf, box, landmark = sm.split(x, (2, 6, 16), axis=-1)
return conf, box, landmark
def rnet_loss(conf, box, landmark, gt_conf, gt_box, gt_landmark, conf_mask,
box_mask, landmark_mask):
"""Create the loss of rnet.
Args:
conf, box, landmark: The output of pnet.
gt_conf, gt_box, gt_landmark: ground truth labels.
conf_mask, box_mask, landmark_mask: to balance the three loss
"""
conf_loss = conf_mask * sm.softmax_cross_entropy_with_logits(labels=gt_conf,
logits=conf)
#conf_loss = conf_mask * sm.hse(gt_conf, conf)
box_loss = box_mask * sm.hse(gt_box, box)
landmark_loss = landmark_mask * sm.hse(gt_landmark, landmark)
return conf_loss, box_loss, landmark_loss
def onet(x):
"""ONet in MTCNN.
Args:
x: The input tensor. (n, 48, 48, 3)
"""
if sm.net.use_cuda:
x = sm.transform(x, 'NHWC2NCHW')
x = sm.slim.conv2d(x, 3, 32, 3, 1, padding='VALID')
x = sm.max_pool(x, (1, 3, 3, 1), (1, 2, 2, 1), padding='SAME')
x = sm.slim.conv2d(x, 32, 64, 3, 1, padding='VALID')
x = sm.max_pool(x, (1, 3, 3, 1), (1, 2, 2, 1), padding='VALID')
x = sm.slim.conv2d(x, 64, 64, 3, 1, padding='VALID')
x = sm.max_pool(x, (1, 2, 2, 1), (1, 2, 2, 1), padding='SAME')
x = sm.slim.conv2d(x, 64, 128, 2, 1, padding='VALID')
# use conv2d to replace full-connect
x = sm.slim.conv2d(x, 128, 256, 3, 1, padding='VALID')
x = sm.slim.conv2d(x, 256, 16, 1, 1, padding='VALID', act=None)
if sm.net.use_cuda:
x = sm.transform(x, 'NCHW2NHWC')
x = sm.reshape(x, (-1, 16))
conf, box, landmark = sm.split(x, (2, 6, 16), axis=-1)
return conf, box, landmark
def onet_loss(conf, box, landmark, gt_conf, gt_box, gt_landmark, conf_mask,
box_mask, landmark_mask):
"""Create the loss of onet.
Args:
conf, box, landmark: The output of rnet.
gt_conf, gt_box, gt_landmark: ground truth labels.
conf_mask, box_mask, landmark_mask: to balance the three loss
"""
conf_loss = conf_mask * sm.softmax_cross_entropy_with_logits(labels=gt_conf,
logits=conf)
box_loss = box_mask * sm.hse(gt_box, box)
landmark_loss = landmark_mask * sm.hse(gt_landmark, landmark)
return conf_loss, box_loss, landmark_loss
def slim_conv2d(x, ci, co, filter_size, stride, padding, bias=True,
act=tf.nn.relu):
weight = np.random.normal(0, 0.01, (co, ci, filter_size, filter_size))
weight = np.transpose(weight, (2, 3, 1, 0))
weight = tf.Variable(weight, dtype=tf.float32)
y = tf.nn.conv2d(x, weight, (1, stride, stride, 1), padding=padding)
if bias:
bias = tf.Variable(np.zeros(shape=(co, )), dtype=tf.float32)
y += bias
if act:
y = act(y)
return y
def tf_pnet(x):
"""The same as `pnet`."""
np.random.seed(196)
x = slim_conv2d(x, 3, 10, 3, 1, padding='VALID')
x = tf.nn.max_pool(x, (1, 3, 3, 1), (1, 2, 2, 1), padding='SAME')
x = slim_conv2d(x, 10, 16, 3, 1, padding='VALID')
x = slim_conv2d(x, 16, 32, 3, 1, padding='VALID')
x = slim_conv2d(x, 32, 16, 1, 1, padding='VALID', act=None)
conf, box, landmark = tf.split(x, (2, 4, 10), axis=-1)
return conf, box, landmark
def tf_rnet(x):
"""The same as rnet."""
np.random.seed(196)
x = slim_conv2d(x, 3, 28, 3, 1, padding='VALID')
x = tf.nn.max_pool(x, (1, 3, 3, 1), (1, 2, 2, 1), padding='SAME')
x = slim_conv2d(x, 28, 48, 3, 1, padding='VALID')
x = tf.nn.max_pool(x, (1, 3, 3, 1), (1, 2, 2, 1), padding='VALID')
x = slim_conv2d(x, 48, 64, 2, 1, padding='VALID')
# use conv2d to replace full-connect
x = slim_conv2d(x, 64, 128, 3, 1, padding='VALID')
x = slim_conv2d(x, 128, 16, 1, 1, padding='VALID', act=None)
x = tf.reshape(x, (-1, 16))
conf, box, landmark = tf.split(x, (2, 4, 10), axis=-1)
return conf, box, landmark
def tf_pnet_loss(conf, box, landmark, gt_conf, gt_box, gt_landmark, conf_mask,
box_mask, landmark_mask):
"""The same as `pnet_loss`."""
conf_loss = conf_mask * tf.nn.softmax_cross_entropy_with_logits_v2(labels=gt_conf,
logits=conf)
box_loss = box_mask * 0.5 * tf.square(gt_box - box)
landmark_loss = landmark_mask * 0.5 * tf.square(gt_landmark - landmark)
return conf_loss, box_loss, landmark_loss
def maer(a, b, gt):
a = np.array(a).reshape(-1)
b = np.array(b).reshape(-1)
gt = np.array(gt).reshape(-1)
keep = (a - b) != 0
a = a[keep]
b = b[keep]
gt = gt[keep]
if a.size == 0:
return 0.
smae = np.max(np.abs(a - b) / np.abs(gt))
smae_id = np.argmax(np.abs(a - b) / np.abs(gt))
if smae > thrs:
print(a)
print()
print(b)
print(a[smae_id])
print(b[smae_id])
pass
#raise ValueError('Too large error in func self_max_abs_error: {}, '
# 'the threshold is {}'.format(smae, thrs))
return smae
def check_rnet():
import time
lr = 1/32
np.random.seed(196)
x = sm.Tensor()
gt_conf = sm.Tensor()
gt_box = sm.Tensor()
gt_landmark = sm.Tensor()
conf_mask = sm.Tensor()
box_mask = sm.Tensor()
landmark_mask = sm.Tensor()
conf, box, landmark = rnet(x)
conf_loss, box_loss, landmark_loss = rnet_loss(conf, box, landmark,
gt_conf, gt_box,
gt_landmark, conf_mask,
box_mask, landmark_mask)
# check same shape
epoch = 3
np.random.seed(196)
attrs = []
for _ in range(epoch):
h = 24
w = 24
dx = np.random.uniform(-1, 1, (32, 24, 24, 3))
dconf = np.random.choice([0, 1], (32, 1))
dconf = np.concatenate([dconf, 1-dconf], -1)
dbox = np.random.randn(32, 4)
dlandmark = np.random.randn(32, 10)
dconf_mask = np.random.randn(32)
dbox_mask = np.random.randn(32, 4)
dlandmark_mask = np.random.randn(32, 10)
conf_, box_, landmark_, conf_loss_, box_loss_, landmark_loss_ = sm.forward(
[conf, box, landmark, conf_loss, box_loss, landmark_loss],
{x: dx, gt_conf: dconf, gt_box: dbox, gt_landmark: dlandmark,
conf_mask: np.stack([dconf_mask] * 2, -1), box_mask: dbox_mask, landmark_mask: dlandmark_mask})
sm.optimize([conf_loss, box_loss, landmark_loss], lr=lr)
attrs.append([conf_, box_, landmark_, conf_loss_, box_loss_, landmark_loss_])
with tf.device('/cpu:0'):
x1 = tf.placeholder(tf.float32)
gt_conf1 = tf.placeholder(tf.float32)
gt_box1 = tf.placeholder(tf.float32)
gt_landmark1 = tf.placeholder(tf.float32)
conf_mask1 = tf.placeholder(tf.float32)
box_mask1 = tf.placeholder(tf.float32)
landmark_mask1 = tf.placeholder(tf.float32)
conf1, box1, landmark1 = tf_rnet(x1)
conf_loss1, box_loss1, landmark_loss1 = tf_pnet_loss(conf1, box1, landmark1,
gt_conf1, gt_box1,
gt_landmark1, conf_mask1,
box_mask1, landmark_mask1)
loss = tf.reduce_sum(conf_loss1) + tf.reduce_sum(box_loss1) + tf.reduce_sum(landmark_loss1)
opt = tf.train.GradientDescentOptimizer(lr).minimize(loss)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
np.random.seed(196)
epoch = 3
attrs1 = []
for _ in range(epoch):
h = 24
w = 24
dx = np.random.uniform(-1, 1, (32, 24, 24, 3))
dconf = np.random.choice([0, 1], (32, 1))
dconf = np.concatenate([dconf, 1-dconf], -1)
dbox = np.random.randn(32, 4)
dlandmark = np.random.randn(32, 10)
dconf_mask = np.random.randn(32)
dbox_mask = np.random.randn(32, 4)
dlandmark_mask = np.random.randn(32, 10)
_, loss1_, conf1_, box1_, landmark1_, conf_loss1_, box_loss1_, landmark_loss1_ = sess.run(
[opt, loss, conf1, box1, landmark1, conf_loss1, box_loss1, landmark_loss1],
{x1: dx, gt_conf1: dconf, gt_box1: dbox, gt_landmark1: dlandmark,
conf_mask1: dconf_mask, box_mask1: dbox_mask, landmark_mask1: dlandmark_mask})
attrs1.append([loss1_, conf1_, box1_, landmark1_, conf_loss1_, box_loss1_, landmark_loss1_])
print('RNet:')
for (conf_, box_, landmark_, conf_loss_, box_loss_, landmark_loss_), \
(loss1_, conf1_, box1_, landmark1_, conf_loss1_, box_loss1_, landmark_loss1_) \
in zip(attrs, attrs1):
conf_error = maer(conf_, conf1_, conf1_)
box_error = maer(box_, box1_, box1_)
landmark_error = maer(landmark_, landmark1_, landmark1_)
conf_loss_error = maer(np.sum(conf_loss_, -1), conf_loss1_, conf_loss1_)
box_loss_error = maer(box_loss_, box_loss1_, box_loss1_)
landmark_loss_error = maer(landmark_loss_, landmark_loss1_, landmark_loss1_)
print('loss1_:', loss1_)
print('loss_:', np.sum(conf_loss_) + np.sum(box_loss_) + np.sum(landmark_loss_))
print('conf_error:', conf_error)
print('box_error:', box_error)
print('landmark_error:', landmark_error)
print('conf_loss_error:', conf_loss_error)
print('box_loss_error:', box_loss_error)
print('landmark_loss_error:', landmark_loss_error)
if __name__ == '__main__':
"""Check the accuraty of pnet in smnet."""
import time
thrs = 1e-4
lr = 1/32
check_rnet()
exit()
np.random.seed(196)
x = sm.Tensor()
gt_conf = sm.Tensor()
gt_box = sm.Tensor()
gt_landmark = sm.Tensor()
conf_mask = sm.Tensor()
box_mask = sm.Tensor()
landmark_mask = sm.Tensor()
conf, box, landmark = pnet(x)
conf_loss, box_loss, landmark_loss = pnet_loss(conf, box, landmark,
gt_conf, gt_box,
gt_landmark, conf_mask,
box_mask, landmark_mask)
# check same shape
epoch = 3
np.random.seed(196)
attrs = []
for _ in range(epoch):
#h = 224
#w = 224
h = np.random.randint(12, 224)
w = np.random.randint(12, 224)
subh = int(np.ceil((h - 2) / 2)) - 4
subw = int(np.ceil((w - 2) / 2)) - 4
dx = np.random.uniform(-1, 1, (32, h, w, 3))
#dconf = np.random.randn(32, subh, subw, 2)
dconf = np.random.choice([0, 1], (32, subh, subw, 1))
dconf = np.concatenate([dconf, 1-dconf], -1)
dbox = np.random.randn(32, subh, subw, 4)
dlandmark = np.random.randn(32, subh, subw, 10)
dconf_mask = np.random.randn(32, subh, subw)
dbox_mask = np.random.randn(32, subh, subw, 4)
dlandmark_mask = np.random.randn(32, subh, subw, 10)
#DEBUG
#dconf_mask *= 0
#dbox_mask *= 0
#dlandmark_mask *= 0
conf_, box_, landmark_, conf_loss_, box_loss_, landmark_loss_ = sm.forward(
[conf, box, landmark, conf_loss, box_loss, landmark_loss],
{x: dx, gt_conf: dconf, gt_box: dbox, gt_landmark: dlandmark,
conf_mask: np.stack([dconf_mask] * 2, -1), box_mask: dbox_mask, landmark_mask: dlandmark_mask})
sm.optimize([conf_loss, box_loss, landmark_loss], lr=lr)
attrs.append([conf_, box_, landmark_, conf_loss_, box_loss_, landmark_loss_])
with tf.device('/cpu:0'):
x1 = tf.placeholder(tf.float32)
gt_conf1 = tf.placeholder(tf.float32)
gt_box1 = tf.placeholder(tf.float32)
gt_landmark1 = tf.placeholder(tf.float32)
conf_mask1 = tf.placeholder(tf.float32)
box_mask1 = tf.placeholder(tf.float32)
landmark_mask1 = tf.placeholder(tf.float32)
conf1, box1, landmark1 = tf_pnet(x1)
conf_loss1, box_loss1, landmark_loss1 = tf_pnet_loss(conf1, box1, landmark1,
gt_conf1, gt_box1,
gt_landmark1, conf_mask1,
box_mask1, landmark_mask1)
loss = tf.reduce_sum(conf_loss1) + tf.reduce_sum(box_loss1) + tf.reduce_sum(landmark_loss1)
opt = tf.train.GradientDescentOptimizer(lr).minimize(loss)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
np.random.seed(196)
epoch = 3
attrs1 = []
for _ in range(epoch):
#h = 224
#w = 224
h = np.random.randint(12, 224)
w = np.random.randint(12, 224)
subh = int(np.ceil((h - 2) / 2)) - 4
subw = int(np.ceil((w - 2) / 2)) - 4
dx = np.random.uniform(-1, 1, (32, h, w, 3))
dconf = np.random.choice([0, 1], (32, subh, subw, 1))
dconf = np.concatenate([dconf, 1-dconf], -1)
dbox = np.random.randn(32, subh, subw, 4)
dlandmark = np.random.randn(32, subh, subw, 10)
dconf_mask = np.random.randn(32, subh, subw)
dbox_mask = np.random.randn(32, subh, subw, 4)
dlandmark_mask = np.random.randn(32, subh, subw, 10)
#DEBUG
#dconf_mask *= 0
#dbox_mask *= 0
#dlandmark_mask *= 0
_, loss1_, conf1_, box1_, landmark1_, conf_loss1_, box_loss1_, landmark_loss1_ = sess.run(
[opt, loss, conf1, box1, landmark1, conf_loss1, box_loss1, landmark_loss1],
{x1: dx, gt_conf1: dconf, gt_box1: dbox, gt_landmark1: dlandmark,
conf_mask1: dconf_mask, box_mask1: dbox_mask, landmark_mask1: dlandmark_mask})
attrs1.append([loss1_, conf1_, box1_, landmark1_, conf_loss1_, box_loss1_, landmark_loss1_])
for (conf_, box_, landmark_, conf_loss_, box_loss_, landmark_loss_), \
(loss1_, conf1_, box1_, landmark1_, conf_loss1_, box_loss1_, landmark_loss1_) \
in zip(attrs, attrs1):
conf_error = maer(conf_, conf1_, conf1_)
box_error = maer(box_, box1_, box1_)
landmark_error = maer(landmark_, landmark1_, landmark1_)
conf_loss_error = maer(np.sum(conf_loss_, -1), conf_loss1_, conf_loss1_)
box_loss_error = maer(box_loss_, box_loss1_, box_loss1_)
landmark_loss_error = maer(landmark_loss_, landmark_loss1_, landmark_loss1_)
print('loss1_:', loss1_)
print('loss_:', np.sum(conf_loss_) + np.sum(box_loss_) + np.sum(landmark_loss_))
print('conf_error:', conf_error)
print('box_error:', box_error)
print('landmark_error:', landmark_error)
print('conf_loss_error:', conf_loss_error)
print('box_loss_error:', box_loss_error)
print('landmark_loss_error:', landmark_loss_error)
epoch = 100
h = 224
w = 224
subh = int(np.ceil((h - 2) / 2)) - 4
subw = int(np.ceil((w - 2) / 2)) - 4
dx = np.random.uniform(-1, 1, (32, h, w, 3))
dconf = np.random.choice([0, 1], (32, subh, subw, 1))
dconf = np.concatenate([dconf, 1-dconf], -1)
dbox = np.random.randn(32, subh, subw, 4)
dlandmark = np.random.randn(32, subh, subw, 10)
dconf_mask = np.random.randn(32, subh, subw)
dbox_mask = np.random.randn(32, subh, subw, 4)
dlandmark_mask = np.random.randn(32, subh, subw, 10)
# warmup
for _ in range(10):
sm.forward(
[],
{x: dx, gt_conf: dconf, gt_box: dbox, gt_landmark: dlandmark,
conf_mask: np.stack([dconf_mask] * 2, -1), box_mask: dbox_mask, landmark_mask: dlandmark_mask})
sm.optimize([conf_loss, box_loss, landmark_loss], lr=lr)
t1 = time.time()
for _ in range(epoch):
sm.forward(
[],
{x: dx, gt_conf: dconf, gt_box: dbox, gt_landmark: dlandmark,
conf_mask: np.stack([dconf_mask] * 2, -1), box_mask: dbox_mask, landmark_mask: dlandmark_mask})
sm.optimize([conf_loss, box_loss, landmark_loss], lr=lr)
t2 = time.time()
print('sm train time: {}s'.format((t2 - t1) / epoch))
"""# warmup
for _ in range(10):
sess.run(
[opt],
{x1: dx, gt_conf1: dconf, gt_box1: dbox, gt_landmark1: dlandmark,
conf_mask1: dconf_mask, box_mask1: dbox_mask, landmark_mask1: dlandmark_mask})
t1 = time.time()
for _ in range(epoch):
sess.run(
[opt],
{x1: dx, gt_conf1: dconf, gt_box1: dbox, gt_landmark1: dlandmark,
conf_mask1: dconf_mask, box_mask1: dbox_mask, landmark_mask1: dlandmark_mask})
t2 = time.time()
print('tf train time: {}s'.format((t2 - t1) / epoch))"""
| 40.907631
| 108
| 0.564795
|
affa772a3c0c5efd71d9a56c728d473461a0fe6a
| 25
|
py
|
Python
|
bindings/python/cobble/__init__.py
|
charliebruce/cobble
|
92ed485681720efba050e037345dea1c3c031f88
|
[
"MIT"
] | null | null | null |
bindings/python/cobble/__init__.py
|
charliebruce/cobble
|
92ed485681720efba050e037345dea1c3c031f88
|
[
"MIT"
] | 5
|
2021-11-12T17:23:58.000Z
|
2022-03-29T16:38:45.000Z
|
bindings/python/cobble/__init__.py
|
charliebruce/cobble
|
92ed485681720efba050e037345dea1c3c031f88
|
[
"MIT"
] | null | null | null |
from cobble import cobble
| 25
| 25
| 0.88
|
89c43aebbfd769e864c55823a6a77d2287a982ee
| 124,475
|
py
|
Python
|
python/paddle/fluid/dygraph/nn.py
|
javakian/Paddle
|
10018f1561cb8f75f8df982dcf2217e50cee2647
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/dygraph/nn.py
|
javakian/Paddle
|
10018f1561cb8f75f8df982dcf2217e50cee2647
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/dygraph/nn.py
|
javakian/Paddle
|
10018f1561cb8f75f8df982dcf2217e50cee2647
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from six.moves import reduce
from .. import core
from ..layers import utils
from . import layers
from ..framework import Variable, in_dygraph_mode, OpProtoHolder, Parameter
from ..param_attr import ParamAttr
from ..initializer import Normal, Constant, NumpyArrayInitializer
import numpy as np
import logging
__all__ = [
'Conv2D', 'Conv3D', 'Pool2D', 'FC', 'BatchNorm', 'Embedding', 'GRUUnit',
'LayerNorm', 'NCE', 'PRelu', 'BilinearTensorProduct', 'Conv2DTranspose',
'Conv3DTranspose', 'GroupNorm', 'SpectralNorm', 'TreeConv'
]
class Conv2D(layers.Layer):
"""
This interface is used to construct a callable object of the ``Conv2D`` class.
For more details, refer to code examples.
The convolution2D layer calculates the output based on the input, filter
and strides, paddings, dilations, groups parameters. Input and
Output are in NCHW format, where N is batch size, C is the number of
the feature map, H is the height of the feature map, and W is the width of the feature map.
Filter's shape is [MCHW] , where M is the number of output feature map,
C is the number of input feature map, H is the height of the filter,
and W is the width of the filter. If the groups is greater than 1,
C will equal the number of input feature map divided by the groups.
Please refer to UFLDL's `convolution
<http://ufldl.stanford.edu/tutorial/supervised/FeatureExtractionUsingConvolution/>`_
for more detials.
If bias attribution and activation type are provided, bias is added to the
output of the convolution, and the corresponding activation function is
applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \\sigma (W \\ast X + b)
Where:
* :math:`X`: Input value, a ``Tensor`` with NCHW format.
* :math:`W`: Filter value, a ``Tensor`` with shape [MCHW] .
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D ``Tensor`` with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, C_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{out}, C_{in}, H_f, W_f)`
- Output:
Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`
Where
.. math::
H_{out}&= \\frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 \\\\
W_{out}&= \\frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1
Parameters:
name_scope(str): The name for this class.
num_filters(int): The number of filter. It is as same as the output
feature map.
filter_size (int or tuple): The filter size. If filter_size is a tuple,
it must contain two integers, (filter_size_H, filter_size_W).
Otherwise, the filter will be a square.
stride (int or tuple, optional): The stride size. If stride is a tuple, it must
contain two integers, (stride_H, stride_W). Otherwise, the
stride_H = stride_W = stride. Default: 1.
padding (int or tuple, optional): The padding size. If padding is a tuple, it must
contain two integers, (padding_H, padding_W). Otherwise, the
padding_H = padding_W = padding. Default: 0.
dilation (int or tuple, optional): The dilation size. If dilation is a tuple, it must
contain two integers, (dilation_H, dilation_W). Otherwise, the
dilation_H = dilation_W = dilation. Default: 1.
groups (int, optional): The groups number of the Conv2d Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
connected to the second half of the input channels. Default: 1.
param_attr (ParamAttr, optional): The parameter attribute for learnable weights(Parameter)
of conv2d. If it is set to None or one attribute of ParamAttr, conv2d
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with :math:`Normal(0.0, std)`,
and the :math:`std` is :math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None.
bias_attr (ParamAttr or bool, optional): The attribute for the bias of conv2d.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv2d
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
use_cudnn (bool, optional): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. Default: True.
act (str, optional): Activation type, if it is set to None, activation is not appended.
Default: None.
dtype (str, optional): Data type, it can be "float32" or "float64". Default: "float32".
Attribute:
**weight** (Parameter): the learnable weights of filter of this layer.
**bias** (Parameter or None): the learnable bias of this layer.
Returns:
None
Raises:
ValueError: if ``use_cudnn`` is not a bool value.
Examples:
.. code-block:: python
from paddle.fluid.dygraph.base import to_variable
import paddle.fluid as fluid
from paddle.fluid.dygraph import Conv2D
import numpy as np
data = np.random.uniform(-1, 1, [10, 3, 32, 32]).astype('float32')
with fluid.dygraph.guard():
conv2d = Conv2D("conv2d", 2, 3)
data = to_variable(data)
conv = conv2d(data)
"""
def __init__(self,
name_scope,
num_filters,
filter_size,
stride=1,
padding=0,
dilation=1,
groups=None,
param_attr=None,
bias_attr=None,
use_cudnn=True,
act=None,
dtype='float32'):
assert param_attr is not False, "param_attr should not be False here."
super(Conv2D, self).__init__(name_scope, dtype)
self._groups = groups
self._stride = utils.convert_to_list(stride, 2, 'stride')
self._padding = utils.convert_to_list(padding, 2, 'padding')
self._dilation = utils.convert_to_list(dilation, 2, 'dilation')
self._act = act
if not isinstance(use_cudnn, bool):
raise ValueError("use_cudnn should be True or False")
self._use_cudnn = use_cudnn
self._filter_size = filter_size
self._num_filters = num_filters
self._param_attr = param_attr
self._bias_attr = bias_attr
self._dtype = dtype
# if (self._num_channels == self._groups and
# num_filters % self._num_channels == 0 and not self._use_cudnn):
# self._l_type = 'depthwise_conv2d'
# else:
# TODO(jiabin): recover the usage of depthwise_conv2d when it's
# kernel fixed https://github.com/PaddlePaddle/Paddle/issues/17275
self._l_type = 'conv2d'
def _build_once(self, input):
self._num_channels = input.shape[1]
if self._groups is None:
num_filter_channels = self._num_channels
else:
if self._num_channels % self._groups != 0:
raise ValueError("num_channels must be divisible by groups.")
num_filter_channels = self._num_channels // self._groups
filter_size = utils.convert_to_list(self._filter_size, 2, 'filter_size')
filter_shape = [self._num_filters, int(num_filter_channels)
] + filter_size
def _get_default_param_initializer():
filter_elem_num = filter_size[0] * filter_size[
1] * self._num_channels
std = (2.0 / filter_elem_num)**0.5
return Normal(0.0, std, 0)
self._filter_param = self.create_parameter(
attr=self._param_attr,
shape=filter_shape,
dtype=self._dtype,
default_initializer=_get_default_param_initializer())
self._bias_param = self.create_parameter(
attr=self._bias_attr,
shape=[self._num_filters],
dtype=self._dtype,
is_bias=True)
@property
def weight(self):
return self._filter_param
@weight.setter
def weight(self, value):
self._filter_param = value
@property
def bias(self):
return self._bias_param
@bias.setter
def bias(self, value):
self._bias_param = value
def forward(self, input):
pre_bias = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type=self._l_type,
inputs={
'Input': input,
'Filter': self._filter_param,
},
outputs={"Output": pre_bias},
attrs={
'strides': self._stride,
'paddings': self._padding,
'dilations': self._dilation,
'groups': self._groups if self._groups else 1,
'use_cudnn': self._use_cudnn,
'use_mkldnn': False,
})
if self._bias_param is not None:
pre_act = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type='elementwise_add',
inputs={'X': [pre_bias],
'Y': [self._bias_param]},
outputs={'Out': [pre_act]},
attrs={'axis': 1})
else:
pre_act = pre_bias
# Currently, we don't support inplace in dygraph mode
return self._helper.append_activation(pre_act, act=self._act)
class Conv3D(layers.Layer):
"""
**Convlution3D Layer**
The convolution3D layer calculates the output based on the input, filter
and strides, paddings, dilations, groups parameters. Input(Input) and
Output(Output) are multidimensional tensors with a shape of
:math:`[N, C, D, H, W]` . Where N is batch size, C is the number of
channels, D is the depth of the feature, H is the height of the feature,
and W is the width of the feature. Convlution3D is similar with Convlution2D
but adds one dimension(depth). If bias attribution and activation type are
provided, bias is added to the output of the convolution, and the
corresponding activation function is applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \\ast X + b)
In the above equation:
* :math:`X`: Input value, a tensor with NCDHW or NDHWC format.
* :math:`W`: Filter value, a tensor with MCDHW format.
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D tensor with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{out}, C_{in}, D_f, H_f, W_f)`
- Output:
Output shape: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`
Where
.. math::
D_{out}&= \\frac{(D_{in} + 2 * paddings[0] - (dilations[0] * (D_f - 1) + 1))}{strides[0]} + 1 \\\\
H_{out}&= \\frac{(H_{in} + 2 * paddings[1] - (dilations[1] * (H_f - 1) + 1))}{strides[1]} + 1 \\\\
W_{out}&= \\frac{(W_{in} + 2 * paddings[2] - (dilations[2] * (W_f - 1) + 1))}{strides[2]} + 1
Parameters:
name_scope(str) : The name for this class.
num_filters(int): The number of filter. It is as same as the output image channel.
filter_size (int|tuple, optional): The filter size. If filter_size is a tuple,
it must contain three integers, (filter_size_D, filter_size_H, filter_size_W).
Otherwise, the filter will be a square, filter_size_depth = filter_size_height
= filter_size_width = filter_size.
stride (int|tuple, optional): The stride size. If stride is a tuple, it must
contain three integers, (stride_D, stride_H, stride_W). Otherwise, the
stride_D = stride_H = stride_W = stride. The default value is 1.
padding (int|tuple, optional): The padding size. If padding is a tuple, it must
contain three integers, (padding_D, padding_H, padding_W). Otherwise, the
padding_D = padding_H = padding_W = padding. The default value is 0.
dilation (int|tuple, optional): The dilation size. If dilation is a tuple, it must
contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the
dilation_D = dilation_H = dilation_W = dilation. The default value is 1.
groups (int, optional): The groups number of the Conv3d Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
connected to the second half of the input channels. The default value is 1.
param_attr (ParamAttr, optional): The parameter attribute for learnable parameters/weights
of conv3d. If it is set to None or one attribute of ParamAttr, conv3d
will create ParamAttr as param_attr. If it is set to None, the parameter
is initialized with :math:`Normal(0.0, std)`, and the :math:`std` is
:math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`. The default value is None.
bias_attr (ParamAttr|bool, optional): The parameter attribute for the bias of conv3d.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv3d
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. The default value is None.
use_cudnn (bool, optional): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. The default value is True.
act (str, optional): Activation type, if it is set to None, activation is not appended.
The default value is None.
Attribute:
**weight** (Parameter): the learnable weights of filters of this layer.
**bias** (Parameter): the learnable bias of this layer.
Returns:
None.
Raises:
ValueError: If the shapes of input, filter_size, stride, padding and
groups mismatch.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
with fluid.dygraph.guard():
data = numpy.random.random((5, 3, 12, 32, 32)).astype('float32')
conv3d = fluid.dygraph.nn.Conv3D(
'Conv3D', num_filters=2, filter_size=3, act="relu")
ret = conv3d(fluid.dygraph.base.to_variable(data))
"""
def __init__(self,
name_scope,
num_filters,
filter_size,
stride=1,
padding=0,
dilation=1,
groups=None,
param_attr=None,
bias_attr=None,
use_cudnn=True,
act=None):
assert param_attr is not False, "param_attr should not be False here."
super(Conv3D, self).__init__(name_scope)
self._groups = groups
self._stride = utils.convert_to_list(stride, 3, 'stride')
self._padding = utils.convert_to_list(padding, 3, 'padding')
self._dilation = utils.convert_to_list(dilation, 3, 'dilation')
self._act = act
if not isinstance(use_cudnn, bool):
raise ValueError("use_cudnn should be True or False")
self._use_cudnn = use_cudnn
self._filter_size = filter_size
self._num_filters = num_filters
self._param_attr = param_attr
self._bias_attr = bias_attr
def _build_once(self, input):
num_channels = input.shape[1]
self._dtype = self._helper.input_dtype(input)
if self._groups is None:
num_filter_channels = num_channels
else:
if num_channels % self._groups != 0:
raise ValueError("num_channels must be divisible by groups.")
num_filter_channels = num_channels // self._groups
filter_size = utils.convert_to_list(self._filter_size, 3, 'filter_size')
filter_shape = [self._num_filters, num_filter_channels] + filter_size
def _get_default_param_initializer():
filter_elem_num = filter_size[0] * filter_size[1] * filter_size[
2] * num_channels
std = (2.0 / filter_elem_num)**0.5
return Normal(0.0, std, 0)
self._filter_param = self.create_parameter(
attr=self._param_attr,
shape=filter_shape,
dtype=self._dtype,
default_initializer=_get_default_param_initializer())
self._bias_param = self.create_parameter(
attr=self._bias_attr,
shape=[self._num_filters],
dtype=self._dtype,
is_bias=True)
@property
def weight(self):
return self._filter_param
@weight.setter
def weight(self, value):
self._filter_param = value
@property
def bias(self):
return self._bias_param
@bias.setter
def bias(self, value):
self._bias_param = value
def forward(self, input):
pre_bias = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type='conv3d',
inputs={
'Input': input,
'Filter': self._filter_param,
},
outputs={"Output": pre_bias},
attrs={
'strides': self._stride,
'paddings': self._padding,
'dilations': self._dilation,
'groups': self._groups if self._groups else 1,
'use_cudnn': self._use_cudnn,
'use_mkldnn': False
})
if self._bias_param is not None:
pre_act = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type='elementwise_add',
inputs={'X': [pre_bias],
'Y': [self._bias_param]},
outputs={'Out': [pre_act]},
attrs={'axis': 1})
else:
pre_act = pre_bias
return self._helper.append_activation(pre_act, act=self._act)
class Conv3DTranspose(layers.Layer):
"""
**Convlution3D transpose layer**
The convolution3D transpose layer calculates the output based on the input,
filter, and dilations, strides, paddings. Input(Input) and output(Output)
are in NCDHW format. Where N is batch size, C is the number of channels,
D is the depth of the feature, H is the height of the feature, and W
is the width of the feature. Parameters(dilations, strides, paddings) are
two elements. These two elements represent height and width, respectively.
The details of convolution transpose layer, please refer to the following
explanation and references `therein <http://www.matthewzeiler.com/wp-content/uploads/2017/07/cvpr2010.pdf>`_.
If bias attribution and activation type are provided, bias is added to
the output of the convolution, and the corresponding activation function
is applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \\ast X + b)
In the above equation:
* :math:`X`: Input value, a tensor with NCDHW format.
* :math:`W`: Filter value, a tensor with MCDHW format.
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D tensor with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{in}, C_{out}, D_f, H_f, W_f)`
- Output:
Output shape: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`
Where
.. math::
D^\prime_{out} &= (D_{in} - 1) * strides[0] - 2 * paddings[0] + dilations[0] * (D_f - 1) + 1 \\\\
H^\prime_{out} &= (H_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (H_f - 1) + 1 \\\\
W^\prime_{out} &= (W_{in} - 1) * strides[2] - 2 * paddings[2] + dilations[2] * (W_f - 1) + 1 \\\\
D_{out} &\in [ D^\prime_{out}, D^\prime_{out} + strides[0] ] \\\\
H_{out} &\in [ H^\prime_{out}, H^\prime_{out} + strides[1] ] \\\\
**Note**:
The conv3d_transpose can be seen as the backward of the conv3d. For conv3d,
when stride > 1, conv3d maps multiple input shape to the same output shape,
so for conv3d_transpose, when stride > 1, input shape maps multiple output shape.
If output_size is None, :math:`H_{out} = H^\prime_{out}, :math:`H_{out} = \
H^\prime_{out}, W_{out} = W^\prime_{out}`; else, the :math:`D_{out}` of the output
size must between :math:`D^\prime_{out}` and :math:`D^\prime_{out} + strides[0]`,
the :math:`H_{out}` of the output size must between :math:`H^\prime_{out}`
and :math:`H^\prime_{out} + strides[1]`, and the :math:`W_{out}` of the output size must
between :math:`W^\prime_{out}` and :math:`W^\prime_{out} + strides[2]`,
conv3d_transpose can compute the kernel size automatically.
Parameters:
name_scope(str) : The name for this class.
num_filters(int): The number of the filter. It is as same as the output
image channel.
output_size(int|tuple, optional): The output image size. If output size is a
tuple, it must contain three integers, (image_depth, image_height, image_width). This
parameter only works when filter_size is None. If output_size and filter_size are
specified at the same time, They should follow the formula above. The default value is None.
Output_size and filter_size should not be None at the same time.
filter_size(int|tuple, optional): The filter size. If filter_size is a tuple,
it must contain three integers, (filter_size_D, filter_size_H, filter_size_W).
Otherwise, the filter will be a square. None if use output size to
calculate filter_size. The default value is None.
padding(int|tuple, optional): The padding size. The padding argument effectively
adds `dilation * (kernel - 1)` amount of zero-padding on both sides of input. If `padding` is a string,
either 'VALID' or 'SAME' supported, which is the padding algorithm. If `padding`
is a tuple or list, it could be in three forms: `[pad_depth, pad_height, pad_width]` or
`[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`,
and when `data_format` is `'NCDHW'`, `padding` can be in the form
`[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
when `data_format` is `'NDHWC'`, `padding` can be in the form
`[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
The default value is 0.
stride(int|tuple, optional): The stride size. It means the stride in transposed convolution.
If stride is a tuple, it must contain three integers, (stride_depth, stride_height,
stride_width). Otherwise, stride_depth = stride_height = stride_width = stride.
The default value is 1.
dilation(int|tuple, optional): The dilation size. If dilation is a tuple, it must
contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the
dilation_D = dilation_H = dilation_W = dilation. The default value is 1.
groups(int, optional): The groups number of the Conv3d transpose layer. Inspired by
grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
when group=2, the first half of the filters is only connected to the
first half of the input channels, while the second half of the
filters is only connected to the second half of the input channels.
The default value is 1.
param_attr (ParamAttr, optional): The parameter attribute for learnable parameters/weights
of conv3d_transpose. If it is set to None or one attribute of ParamAttr, conv3d_transpose
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. The default value is None.
bias_attr (ParamAttr|bool, optional): The parameter attribute for the bias of conv3d_transpose.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv3d_transpose
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. The default value is None.
use_cudnn(bool, optional): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. The default value is True.
act (str, optional): Activation type, if it is set to None, activation is not appended.
The default value is None.
name(str, optional): The default value is None. Normally there is no need for user
to set this property. For more information, please refer to :ref:`api_guide_Name`.
Attribute:
**weight** (Parameter): the learnable weights of filters of this layer.
**bias** (Parameter): the learnable bias of this layer.
Returns:
None.
Raises:
ValueError: If the shapes of input, filter_size, stride, padding and
groups mismatch.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
with fluid.dygraph.guard():
data = numpy.random.random((5, 3, 12, 32, 32)).astype('float32')
conv3dTranspose = fluid.dygraph.nn.Conv3DTranspose(
'Conv3DTranspose',
num_filters=12,
filter_size=12,
use_cudnn=False)
ret = conv3dTranspose(fluid.dygraph.base.to_variable(data))
"""
def __init__(self,
name_scope,
num_filters,
output_size=None,
filter_size=None,
padding=0,
stride=1,
dilation=1,
groups=None,
param_attr=None,
bias_attr=None,
use_cudnn=True,
act=None,
name=None):
super(Conv3DTranspose, self).__init__(name_scope)
if not isinstance(use_cudnn, bool):
raise ValueError("use_cudnn should be True or False")
assert param_attr is not False, "param_attr should not be False in conv3d_transpose."
self._padding = utils.convert_to_list(padding, 3, 'padding')
self._stride = utils.convert_to_list(stride, 3, 'stride')
self._dilation = utils.convert_to_list(dilation, 3, 'dilation')
self._param_attr = param_attr
self._filter_size = filter_size
self._output_size = output_size
self._groups = 1 if groups is None else groups
self._num_filters = num_filters
self._use_cudnn = use_cudnn
self._bias_attr = bias_attr
self._act = act
def _build_once(self, input):
self._dtype = self._helper.input_dtype(input)
self._input_channel = input.shape[1]
if self._filter_size is None:
if self._output_size is None:
raise ValueError(
"output_size must be set when filter_size is None")
if isinstance(self._output_size, int):
self._output_size = [self._output_size, self._output_size]
d_in = input.shape[2]
h_in = input.shape[3]
w_in = input.shape[4]
filter_size_d = (self._output_size[0] -
(d_in - 1) * self._stride[0] + 2 * self._padding[0]
- 1) // self._dilation[0] + 1
filter_size_h = (self._output_size[1] -
(h_in - 1) * self._stride[1] + 2 * self._padding[1]
- 1) // self._dilation[1] + 1
filter_size_w = (self._output_size[2] -
(w_in - 1) * self._stride[2] + 2 * self._padding[2]
- 1) // self._dilation[2] + 1
self._filter_size = [filter_size_d, filter_size_h, filter_size_w]
else:
self._filter_size = utils.convert_to_list(
self._filter_size, 3, 'conv3d_transpose.filter_size')
filter_shape = [
self._input_channel, self._num_filters // self._groups
] + self._filter_size
self._img_filter = self.create_parameter(
dtype=self._dtype, shape=filter_shape, attr=self._param_attr)
if self._bias_attr:
self._bias_param = self.create_parameter(
attr=self._bias_attr,
shape=[self._num_filters],
dtype=self._dtype,
is_bias=True)
@property
def weight(self):
return self._img_filter
@weight.setter
def weight(self, value):
self._img_filter = value
@property
def bias(self):
return self._bias_param
@bias.setter
def bias(self, value):
self._bias_param = value
def forward(self, input):
pre_bias = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type="conv3d_transpose",
inputs={'Input': [input],
'Filter': [self._img_filter]},
outputs={'Output': pre_bias},
attrs={
'strides': self._stride,
'paddings': self._padding,
'dilations': self._dilation,
'groups': self._groups if self._groups else 1,
'use_cudnn': self._use_cudnn
})
if self._bias_attr:
pre_act = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type='elementwise_add',
inputs={'X': [pre_bias],
'Y': [self._bias_param]},
outputs={'Out': [pre_act]},
attrs={'axis': 1})
else:
pre_act = pre_bias
# Currently, we don't support inplace in imperative mode
return self._helper.append_activation(pre_act, act=self._act)
class Pool2D(layers.Layer):
"""
This interface is used to construct a callable object of the ``Pool2D`` class.
For more details, refer to code examples.
The pooling2d operation calculates the output based on the input, pool_type and pool_size, pool_stride,
pool_padding parameters.Input and output are in NCHW format, where N is batch size, C is the number of feature map,
H is the height of the feature map, and W is the width of the feature map.
Parameters(ksize, strides, paddings) are two elements. These two elements represent height and width, respectively.
The input(X) size and output(Out) size may be different.
Example:
- Input:
Input shape: :math:`(N, C, H_{in}, W_{in})`
- Output:
Output shape: :math:`(N, C, H_{out}, W_{out})`
If ``ceil_mode`` = False:
.. math::
H_{out} = \\frac{(H_{in} - ksize[0] + 2 * paddings[0])}{strides[0]} + 1 \\\\
W_{out} = \\frac{(W_{in} - ksize[1] + 2 * paddings[1])}{strides[1]} + 1
If ``ceil_mode`` = True:
.. math::
H_{out} = \\frac{(H_{in} - ksize[0] + 2 * paddings[0] + strides[0] - 1)}{strides[0]} + 1 \\\\
W_{out} = \\frac{(W_{in} - ksize[1] + 2 * paddings[1] + strides[1] - 1)}{strides[1]} + 1
If ``exclusive`` = False:
.. math::
hstart &= i * strides[0] - paddings[0] \\\\
hend &= hstart + ksize[0] \\\\
wstart &= j * strides[1] - paddings[1] \\\\
wend &= wstart + ksize[1] \\\\
Output(i ,j) &= \\frac{sum(Input[hstart:hend, wstart:wend])}{ksize[0] * ksize[1]}
If ``exclusive`` = True:
.. math::
hstart &= max(0, i * strides[0] - paddings[0])\\\\
hend &= min(H, hstart + ksize[0]) \\\\
wstart &= max(0, j * strides[1] - paddings[1]) \\\\
wend & = min(W, wstart + ksize[1]) \\\\
Output(i ,j) & = \\frac{sum(Input[hstart:hend, wstart:wend])}{(hend - hstart) * (wend - wstart)}
Parameters:
name_scope(str) : The name of this class.
pool_size (int or list or tuple, optional): The pool kernel size. If pool kernel size is a tuple or list,
it must contain two integers, (pool_size_Height, pool_size_Width).
Otherwise, the pool kernel size will be a square of an int. Default: -1.
pool_type(str, optional) : The pooling type, can be "max" for max-pooling and "avg" for average-pooling.
Default: max.
pool_stride (int or list or tuple, optional): The pool stride size. If pool stride size is a tuple or list,
it must contain two integers, (pool_stride_Height, pool_stride_Width). Otherwise,
the pool stride size will be a square of an int. Default: 1.
pool_padding (int or list or tuple, optional): The padding size for pooling operation.
If ``pool_padding`` is a tuple,
it must contain two integers, (pool_padding_on_Height, pool_padding_on_Width).
Otherwise, the padding size for pooling operation will be a square of an int. Default: 0.
global_pooling (bool, optional): Whether to use the global pooling. If global_pooling = true,
kernel size and paddings will be ignored. Default: False.
use_cudnn (bool, optional): Only used in cudnn kernel, need install cudnn. Default: True.
ceil_mode (bool, optional): Whether to use the ceil function to calculate output height and width.
False is the default. If it is set to False, the floor function will be used. Default: False.
exclusive (bool, optional): Whether to exclude padding points in average pooling mode. Default: True.
dtype (str, optional): Data type, it can be "float32" or "float64". Default: "float32".
Returns:
None
Raises:
ValueError: If 'pool_type' is not "max" nor "avg"
ValueError: If 'global_pooling' is False and 'pool_size' is -1
ValueError: If 'use_cudnn' is not a bool value.
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
import numpy as np
with fluid.dygraph.guard():
data = numpy.random.random((3, 32, 32, 5)).astype('float32')
pool2d = fluid.dygraph.Pool2D("pool2d",pool_size=2,
pool_type='max',
pool_stride=1,
global_pooling=False)
pool2d_res = pool2d(to_variable(data))
"""
def __init__(self,
name_scope,
pool_size=-1,
pool_type="max",
pool_stride=1,
pool_padding=0,
global_pooling=False,
use_cudnn=True,
ceil_mode=False,
exclusive=True,
dtype=core.VarDesc.VarType.FP32):
if pool_type not in ["max", "avg"]:
raise ValueError(
"Unknown pool_type: '%s'. It can only be 'max' or 'avg'.",
str(pool_type))
if global_pooling is False and pool_size == -1:
raise ValueError(
"When the global_pooling is False, pool_size must be passed "
"and be a valid value. Received pool_size: " + str(pool_size))
if not isinstance(use_cudnn, bool):
raise ValueError("use_cudnn should be True or False")
super(Pool2D, self).__init__(name_scope, dtype=dtype)
self._pool_type = pool_type
self._pool_size = utils.convert_to_list(pool_size, 2, 'pool_size')
self._pool_padding = utils.convert_to_list(pool_padding, 2,
'pool_padding')
self._pool_stride = utils.convert_to_list(pool_stride, 2, 'pool_stride')
self._global_pooling = global_pooling
self._use_cudnn = use_cudnn
self._ceil_mode = ceil_mode
self._exclusive = exclusive
self._l_type = 'pool2d'
def forward(self, input):
pool_out = self._helper.create_variable_for_type_inference(self._dtype)
self._helper.append_op(
type=self._l_type,
inputs={"X": input},
outputs={"Out": pool_out},
attrs={
"pooling_type": self._pool_type,
"ksize": self._pool_size,
"global_pooling": self._global_pooling,
"strides": self._pool_stride,
"paddings": self._pool_padding,
"use_cudnn": self._use_cudnn,
"ceil_mode": self._ceil_mode,
"use_mkldnn": False,
"exclusive": self._exclusive,
})
return pool_out
class FC(layers.Layer):
"""
This interface is used to construct a callable object of the ``FC`` class.
For more details, refer to code examples.
It creates a fully connected layer in the network. It can take
one or multiple ``Tensor`` as its inputs. It creates a Variable called weights for each input tensor,
which represents a fully connected weight matrix from each input unit to
each output unit. The fully connected layer multiplies each input tensor
with its corresponding weight to produce an output Tensor with shape [N, `size`],
where N is batch size. If multiple input tensors are given, the results of
multiple output tensors with shape [N, `size`] will be summed up. If ``bias_attr``
is not None, a bias variable will be created and added to the output.
Finally, if ``act`` is not None, it will be applied to the output as well.
When the input is single ``Tensor`` :
.. math::
Out = Act({XW + b})
When the input are multiple ``Tensor`` :
.. math::
Out = Act({\sum_{i=0}^{N-1}X_iW_i + b})
In the above equation:
* :math:`N`: Number of the input. N equals to len(input) if input is list of ``Tensor`` .
* :math:`X_i`: The i-th input ``Tensor`` .
* :math:`W_i`: The i-th weights matrix corresponding i-th input tensor.
* :math:`b`: The bias parameter created by this layer (if needed).
* :math:`Act`: The activation function.
* :math:`Out`: The output ``Tensor`` .
See below for an example.
.. code-block:: text
Given:
data_1.data = [[[0.1, 0.2]]]
data_1.shape = (1, 1, 2) # 1 is batch_size
data_2.data = [[[0.1, 0.2, 0.3]]]
data_2.shape = (1, 1, 3) # 1 is batch_size
fc = FC("fc", 2, num_flatten_dims=2)
out = fc(input=[data_1, data_2])
Then:
out.data = [[[0.182996 -0.474117]]]
out.shape = (1, 1, 2)
Parameters:
name_scope(str): The name of this class.
size(int): The number of output units in this layer.
num_flatten_dims (int, optional): The fc layer can accept an input tensor with more than
two dimensions. If this happens, the multi-dimension tensor will first be flattened
into a 2-dimensional matrix. The parameter `num_flatten_dims` determines how the input
tensor is flattened: the first `num_flatten_dims` (inclusive, index starts from 1)
dimensions will be flatten to form the first dimension of the final matrix (height of
the matrix), and the rest `rank(X) - num_flatten_dims` dimensions are flattened to
form the second dimension of the final matrix (width of the matrix). For example, suppose
`X` is a 5-dimensional tensor with a shape [2, 3, 4, 5, 6], and `num_flatten_dims` = 3.
Then, the flattened matrix will have a shape [2 x 3 x 4, 5 x 6] = [24, 30]. Default: 1
param_attr (ParamAttr or list of ParamAttr, optional): The parameter attribute for learnable
weights(Parameter) of this layer. Default: None.
bias_attr (ParamAttr or list of ParamAttr, optional): The attribute for the bias
of this layer. If it is set to False, no bias will be added to the output units.
If it is set to None, the bias is initialized zero. Default: None.
act (str, optional): Activation to be applied to the output of this layer. Default: None.
is_test(bool, optional): A flag indicating whether execution is in test phase. Default: False.
dtype(str, optional): Dtype used for weight, it can be "float32" or "float64". Default: "float32".
Attribute:
**weight** (list of Parameter): the learnable weights of this layer.
**bias** (Parameter or None): the learnable bias of this layer.
Returns:
None
Examples:
.. code-block:: python
from paddle.fluid.dygraph.base import to_variable
import paddle.fluid as fluid
from paddle.fluid.dygraph import FC
import numpy as np
data = np.random.uniform(-1, 1, [30, 10, 32]).astype('float32')
with fluid.dygraph.guard():
fc = FC("fc", 64, num_flatten_dims=2)
data = to_variable(data)
conv = fc(data)
"""
def __init__(self,
name_scope,
size,
num_flatten_dims=1,
param_attr=None,
bias_attr=None,
act=None,
is_test=False,
dtype="float32"):
super(FC, self).__init__(name_scope, dtype)
self._size = size
self._num_flatten_dims = num_flatten_dims
self._dtype = dtype
self._param_attr = param_attr
self._bias_attr = bias_attr
self._act = act
self.__w = list()
def _build_once(self, input):
i = 0
for inp, param in self._helper.iter_inputs_and_params(input,
self._param_attr):
input_shape = inp.shape
param_shape = [
reduce(lambda a, b: a * b, input_shape[self._num_flatten_dims:],
1)
] + [self._size]
self.__w.append(
self.add_parameter(
'_w%d' % i,
self.create_parameter(
attr=param,
shape=param_shape,
dtype=self._dtype,
is_bias=False)))
i += 1
size = list([self._size])
self._b = self.create_parameter(
attr=self._bias_attr, shape=size, dtype=self._dtype, is_bias=True)
# TODO(songyouwei): We should remove _w property
@property
def _w(self, i=0):
return self.__w[i]
@_w.setter
def _w(self, value, i=0):
assert isinstance(self.__w[i], Variable)
self.__w[i].set_value(value)
@property
def weight(self):
if len(self.__w) > 1:
return self.__w
else:
return self.__w[0]
@weight.setter
def weight(self, value):
if len(self.__w) == 1:
self.__w[0] = value
@property
def bias(self):
return self._b
@bias.setter
def bias(self, value):
self._b = value
def forward(self, input):
mul_results = list()
i = 0
for inp, param in self._helper.iter_inputs_and_params(input,
self._param_attr):
tmp = self._helper.create_variable_for_type_inference(self._dtype)
self._helper.append_op(
type="mul",
inputs={"X": inp,
"Y": self.__w[i]},
outputs={"Out": tmp},
attrs={
"x_num_col_dims": self._num_flatten_dims,
"y_num_col_dims": 1
})
i += 1
mul_results.append(tmp)
if len(mul_results) == 1:
pre_bias = mul_results[0]
else:
pre_bias = self._helper.create_variable_for_type_inference(
self._dtype)
self._helper.append_op(
type="sum",
inputs={"X": mul_results},
outputs={"Out": pre_bias},
attrs={"use_mkldnn": False})
if self._b:
pre_activation = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type='elementwise_add',
inputs={'X': [pre_bias],
'Y': [self._b]},
outputs={'Out': [pre_activation]},
attrs={'axis': self._num_flatten_dims})
else:
pre_activation = pre_bias
# Currently, we don't support inplace in dygraph mode
return self._helper.append_activation(pre_activation, act=self._act)
class BatchNorm(layers.Layer):
"""
This interface is used to construct a callable object of the ``BatchNorm`` class.
For more details, refer to code examples.
It implements the function of the Batch Normalization Layer and can be used
as a normalizer function for conv2d and fully connected operations.
The data is normalized by the mean and variance of the channel based on the current batch data.
Refer to `Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift <https://arxiv.org/pdf/1502.03167.pdf>`_
for more details.
When use_global_stats = False, the :math:`\\mu_{\\beta}`
and :math:`\\sigma_{\\beta}^{2}` are the statistics of one mini-batch.
Calculated as follows:
.. math::
\\mu_{\\beta} &\\gets \\frac{1}{m} \\sum_{i=1}^{m} x_i \\qquad &//\\
\ mini-batch\ mean \\\\
\\sigma_{\\beta}^{2} &\\gets \\frac{1}{m} \\sum_{i=1}^{m}(x_i - \\
\\mu_{\\beta})^2 \\qquad &//\ mini-batch\ variance \\\\
- :math:`x` : mini-batch data
- :math:`m` : the size of the mini-batch data
When use_global_stats = True, the :math:`\\mu_{\\beta}`
and :math:`\\sigma_{\\beta}^{2}` are not the statistics of one mini-batch.
They are global or running statistics (moving_mean and moving_variance). It usually got from the
pre-trained model. Calculated as follows:
.. math::
moving\_mean = moving\_mean * momentum + \mu_{\beta} * (1. - momentum) \quad &// global mean \\
moving\_variance = moving\_variance * momentum + \sigma_{\beta}^{2} * (1. - momentum) \quad &// global variance \\
The normalization function formula is as follows:
.. math::
\\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\
\\sigma_{\\beta}^{2} + \\epsilon}} \\qquad &//\ normalize \\\\
y_i &\\gets \\gamma \\hat{x_i} + \\beta \\qquad &//\ scale\ and\ shift
- :math:`\\epsilon` : add a smaller value to the variance to prevent division by zero
- :math:`\\gamma` : trainable proportional parameter
- :math:`\\beta` : trainable deviation parameter
Parameters:
name_scope(str): The name of this class.
num_channels(int): Indicate the number of channels of the input ``Tensor``.
act(str, optional): Activation to be applied to the output of batch normalizaiton. Default: None.
is_test (bool, optional): A flag indicating whether it is in test phrase or not. Default: False.
momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
epsilon(float, optional): The small value added to the variance to prevent division by zero. Default: 1e-5.
param_attr(ParamAttr, optional): The parameter attribute for Parameter `scale`
of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
bias_attr(ParamAttr, optional): The parameter attribute for the bias of batch_norm.
If it is set to None or one attribute of ParamAttr, batch_norm
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
dtype(str, optional): Indicate the data type of the input ``Tensor``,
which can be float32 or float64. Default: float32.
data_layout(str, optional): Specify the input data format, the data format can be "NCHW" or "NHWC". Default: NCHW.
in_place(bool, optional): Make the input and output of batch norm reuse memory. Default: False.
moving_mean_name(str, optional): The name of moving_mean which store the global Mean. Default: None.
moving_variance_name(str, optional): The name of the moving_variance which store the global Variance. Default: None.
do_model_average_for_mean_and_var(bool, optional): Whether parameter mean and variance should do model
average when model average is enabled. Default: True.
use_global_stats(bool, optional): Whether to use global mean and
variance. In inference or test mode, set use_global_stats to true
or is_test to true, and the behavior is equivalent.
In train mode, when setting use_global_stats True, the global mean
and variance are also used during train period. Default: False.
trainable_statistics(bool, optional): Whether to calculate mean and var in eval mode. In eval mode, when
setting trainable_statistics True, mean and variance will be calculated by current batch statistics.
Default: False.
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
import numpy as np
x = np.random.random(size=(3, 10, 3, 7)).astype('float32')
with fluid.dygraph.guard():
x = to_variable(x)
batch_norm = fluid.BatchNorm("batch_norm", 10)
hidden1 = batch_norm(x)
"""
def __init__(self,
name_scope,
num_channels,
act=None,
is_test=False,
momentum=0.9,
epsilon=1e-05,
param_attr=None,
bias_attr=None,
dtype='float32',
data_layout='NCHW',
in_place=False,
moving_mean_name=None,
moving_variance_name=None,
do_model_average_for_mean_and_var=True,
use_global_stats=False,
trainable_statistics=False):
super(BatchNorm, self).__init__(name_scope, dtype)
self._param_attr = param_attr
self._bias_attr = bias_attr
self._act = act
assert bias_attr is not False, "bias_attr should not be False in batch_norm."
if dtype == "float16":
self._dtype = "float32"
else:
self._dtype = dtype
param_shape = [num_channels]
# create parameter
self._scale = self.create_parameter(
attr=self._param_attr,
shape=param_shape,
dtype=self._dtype,
default_initializer=Constant(1.0))
if use_global_stats and self._param_attr.learning_rate == 0.:
self._scale.stop_gradient = True
self._bias = self.create_parameter(
attr=self._bias_attr,
shape=param_shape,
dtype=self._dtype,
is_bias=True)
if use_global_stats and self._param_attr.learning_rate == 0.:
self._bias.stop_gradient = True
self._mean = self.create_parameter(
attr=ParamAttr(
name=moving_mean_name,
initializer=Constant(0.0),
trainable=False,
do_model_average=do_model_average_for_mean_and_var),
shape=param_shape,
dtype=self._dtype)
self._mean.stop_gradient = True
self._variance = self.create_parameter(
attr=ParamAttr(
name=moving_variance_name,
initializer=Constant(1.0),
trainable=False,
do_model_average=do_model_average_for_mean_and_var),
shape=param_shape,
dtype=self._dtype)
self._variance.stop_gradient = True
self._in_place = in_place
self._data_layout = data_layout
self._momentum = momentum
self._epsilon = epsilon
self._is_test = is_test
self._fuse_with_relu = False
self._use_global_stats = use_global_stats
self._trainable_statistics = trainable_statistics
def _build_once(self, input):
pass
def forward(self, input):
# create output
# mean and mean_out share the same memory
mean_out = self._mean
# variance and variance out share the same memory
variance_out = self._variance
saved_mean = self._helper.create_variable_for_type_inference(
dtype=self._dtype, stop_gradient=True)
saved_variance = self._helper.create_variable_for_type_inference(
dtype=self._dtype, stop_gradient=True)
batch_norm_out = input if self._in_place else self._helper.create_variable_for_type_inference(
self._dtype)
self._helper.append_op(
type="batch_norm",
inputs={
"X": input,
"Scale": self._scale,
"Bias": self._bias,
"Mean": self._mean,
"Variance": self._variance
},
outputs={
"Y": batch_norm_out,
"MeanOut": mean_out,
"VarianceOut": variance_out,
"SavedMean": saved_mean,
"SavedVariance": saved_variance
},
attrs={
"momentum": self._momentum,
"epsilon": self._epsilon,
"is_test": self._is_test,
"data_layout": self._data_layout,
"use_mkldnn": False,
"fuse_with_relu": self._fuse_with_relu,
"use_global_stats": self._use_global_stats,
"trainable_statistics": self._trainable_statistics
})
# Currently, we don't support inplace in dygraph mode
return self._helper.append_activation(batch_norm_out, self._act)
class Embedding(layers.Layer):
"""
**Embedding Layer**
This interface is used to construct a callable object of the ``Embedding`` class.
For specific usage, refer to code examples. It implements the function of the Embedding Layer.
This layer is used to lookup embeddings vector of ids provided by :attr:`input` .
It automatically constructs a 2D embedding matrix based on the
input :attr:`size` (vocab_size, emb_size) and :attr:`dtype` .
The shape of output Tensor is generated by appending an emb_size dimension to the
last dimension of the input Tensor shape.
**Note:** The id in :attr:`input` must satisfy :math:`0 =< id < size[0]` ,
otherwise the program will throw an exception and exit.
.. code-block:: text
Case 1:
input is a Tensor. padding_idx = -1
input.data = [[1, 3], [2, 4], [4, 127]
input.shape = [3, 2]
Given size = [128, 16]
output is a Tensor:
out.shape = [3, 2, 16]
out.data = [[[0.129435295, 0.244512452, ..., 0.436322452],
[0.345421456, 0.524563927, ..., 0.144534654]],
[[0.345249859, 0.124939536, ..., 0.194353745],
[0.945345345, 0.435394634, ..., 0.435345365]],
[[0.945345345, 0.435394634, ..., 0.435345365],
[0.0, 0.0, ..., 0.0 ]]] # padding data
The input padding_idx is less than 0, it is automatically converted to padding_idx = -1 + 128 = 127
It will pad all-zero data when ids is 127.
Parameters:
name_scope(str): The name of this class.
size(tuple|list): The shape of the look up table parameter. It should have two elements which indicate the size
of the dictionary of embeddings and the size of each embedding vector respectively.
is_sparse(bool): The flag indicating whether to use sparse update. This parameter only
affects the performance of the backwards gradient update. It is recommended to set
True because sparse update is faster. But some optimizer does not support sparse update,
such as :ref:`api_fluid_optimizer_AdadeltaOptimizer` , :ref:`api_fluid_optimizer_AdamaxOptimizer` ,
:ref:`api_fluid_optimizer_DecayedAdagradOptimizer` , :ref:`api_fluid_optimizer_FtrlOptimizer` ,
:ref:`api_fluid_optimizer_LambOptimizer` and :ref:`api_fluid_optimizer_LarsMomentumOptimizer` .
In these case, is_sparse must be False. Default: False.
is_distributed(bool): Whether to store the embedding matrix in a distributed manner. Only used
in multi-machine distributed CPU training. Default: False.
padding_idx(int|long|None): padding_idx needs to be in the interval [-vocab_size, vocab_size).
If :math:`padding\_idx < 0`, the :math:`padding\_idx` will automatically be converted
to :math:`vocab\_size + padding\_idx` . It will output all-zero padding data whenever lookup
encounters :math:`padding\_idx` in id. And the padding data will not be updated while training.
If set None, it makes no effect to output. Default: None.
param_attr(ParamAttr): To specify the weight parameter property. Default: None, which means the
default weight parameter property is used. See usage for details in :ref:`api_fluid_ParamAttr` . In addition,
user-defined or pre-trained word vectors can be loaded with the :attr:`param_attr` parameter.
The local word vector needs to be transformed into numpy format, and the shape of local word
vector shoud be consistent with :attr:`size` . Then :ref:`api_fluid_initializer_NumpyArrayInitializer`
is used to load custom or pre-trained word vectors. See code example 2 for details.
dtype(np.dtype|core.VarDesc.VarType|str): It refers to the data type of output Tensor.
It must be "float32" or "float64". Default: "float32".
Attribute:
**weight** (Parameter): the learnable weights of this layer.
Returns:
Variable: Embedding Tensor or LoDTensor mapped by input. The data type is the same as :attr:`dtype` .
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.dygraph.base as base
import numpy as np
# example 1
inp_word = np.array([[2, 3, 5], [4, 2, 1]]).astype('int64')
inp_word.shape # [2, 3]
dict_size = 20
with fluid.dygraph.guard():
emb = fluid.dygraph.Embedding(
name_scope='embedding',
size=[dict_size, 32],
param_attr='emb.w',
is_sparse=False)
static_rlt3 = emb(base.to_variable(inp_word))
static_rlt3.shape # [2, 3, 32]
# example 2: load custom or pre-trained word vectors
weight_data = np.random.random(size=(128, 100)) # word vectors with numpy format
w_param_attrs = fluid.ParamAttr(
name="emb_weight",
learning_rate=0.5,
initializer=fluid.initializer.NumpyArrayInitializer(weight_data),
trainable=True)
with fluid.dygraph.guard():
emb = fluid.dygraph.Embedding(
name_scope='embedding',
size=[128, 100],
param_attr= w_param_attrs,
is_sparse=False)
static_rlt3 = emb(base.to_variable(inp_word))
"""
def __init__(self,
name_scope,
size,
is_sparse=False,
is_distributed=False,
padding_idx=None,
param_attr=None,
dtype='float32'):
super(Embedding, self).__init__(name_scope, dtype)
self._size = size
self._is_sparse = is_sparse
self._is_distributed = is_distributed
self._padding_idx = -1 if padding_idx is None else padding_idx if padding_idx >= 0 else (
size[0] + padding_idx)
self._param_attr = param_attr
self._dtype = dtype
self._remote_prefetch = self._is_sparse and (not self._is_distributed)
if self._remote_prefetch:
assert self._is_sparse is True and self._is_distributed is False
self._w = self.create_parameter(
attr=self._param_attr,
shape=self._size,
dtype=self._dtype,
is_bias=False)
@property
def weight(self):
return self._w
@weight.setter
def weight(self, value):
self._w = value
def forward(self, input):
out = self._helper.create_variable_for_type_inference(self._dtype)
self._helper.append_op(
type='lookup_table_v2',
inputs={'Ids': input,
'W': self._w},
outputs={'Out': out},
attrs={
'is_sparse': self._is_sparse,
'is_distributed': self._is_distributed,
'remote_prefetch': self._remote_prefetch,
'padding_idx': self._padding_idx
})
return out
class LayerNorm(layers.Layer):
"""
This interface is used to construct a callable object of the ``LayerNorm`` class.
For more details, refer to code examples.
It implements the function of the Layer Normalization Layer and can be applied to mini-batch input data.
Refer to `Layer Normalization <https://arxiv.org/pdf/1607.06450v1.pdf>`_
The formula is as follows:
.. math::
\\mu & = \\frac{1}{H}\\sum_{i=1}^{H} x_i
\\sigma & = \\sqrt{\\frac{1}{H}\sum_{i=1}^{H}{(x_i - \\mu)^2} + \\epsilon}
y & = f(\\frac{g}{\\sigma}(x - \\mu) + b)
- :math:`x`: the vector representation of the summed inputs to the neurons in that layer.
- :math:`H`: the number of hidden units in a layers
- :math:`\\epsilon`: the small value added to the variance to prevent division by zero.
- :math:`g`: the trainable scale parameter.
- :math:`b`: the trainable bias parameter.
Parameters:
name_scope(str): The name of this class.
scale(bool, optional): Whether to learn the adaptive gain :math:`g` after
normalization. Default: True.
shift(bool, optional): Whether to learn the adaptive bias :math:`b` after
normalization. Default: True.
begin_norm_axis(int, optional): The normalization will be performed along
dimensions from :attr:`begin_norm_axis` to :attr:`rank(input)`.
Default: 1.
epsilon(float, optional): The small value added to the variance to prevent
division by zero. Default: 1e-05.
param_attr(ParamAttr, optional): The parameter attribute for the learnable
gain :math:`g`. If :attr:`scale` is False, :attr:`param_attr` is
omitted. If :attr:`scale` is True and :attr:`param_attr` is None,
a default :code:`ParamAttr` would be added as scale. The
:attr:`param_attr` is initialized as 1 if it is added. Default: None.
bias_attr(ParamAttr, optional): The parameter attribute for the learnable
bias :math:`b`. If :attr:`shift` is False, :attr:`bias_attr` is
omitted. If :attr:`shift` is True and :attr:`param_attr` is None,
a default :code:`ParamAttr` would be added as bias. The
:attr:`bias_attr` is initialized as 0 if it is added. Default: None.
act(str, optional): Activation to be applied to the output of layer normalizaiton.
Default: None.
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
import numpy
x = numpy.random.random((3, 32, 32)).astype('float32')
with fluid.dygraph.guard():
x = to_variable(x)
layerNorm = fluid.LayerNorm('LayerNorm', begin_norm_axis=1)
ret = layerNorm(x)
"""
def __init__(self,
name_scope,
scale=True,
shift=True,
begin_norm_axis=1,
epsilon=1e-05,
param_attr=None,
bias_attr=None,
act=None):
super(LayerNorm, self).__init__(name_scope)
self._scale = scale
self._shift = shift
self._begin_norm_axis = begin_norm_axis
self._epsilon = epsilon
self._param_attr = param_attr
self._bias_attr = bias_attr
self._act = act
def _build_once(self, input):
self._dtype = self._helper.input_dtype(input)
input_shape = input.shape
param_shape = [
reduce(lambda x, y: x * y, input_shape[self._begin_norm_axis:])
]
if self._scale:
self._scale_w = self.create_parameter(
attr=self._param_attr,
shape=param_shape,
dtype=self._dtype,
default_initializer=Constant(1.0))
else:
if self._param_attr:
logging.warn("param_attr are only avaliable with scale is True")
if self._shift:
assert self._bias_attr is not False
self._bias_w = self.create_parameter(
attr=self._bias_attr,
shape=param_shape,
dtype=self._dtype,
is_bias=True)
else:
if self._bias_attr:
logging.warn("bias_attr are only avaliable with shift is True")
def forward(self, input):
inputs = dict()
inputs['X'] = input
if self._scale:
inputs['Scale'] = self._scale_w
if self._shift:
inputs['Bias'] = self._bias_w
# create output
mean_out = self._helper.create_variable_for_type_inference(
dtype=self._dtype, stop_gradient=True)
variance_out = self._helper.create_variable_for_type_inference(
dtype=self._dtype, stop_gradient=True)
layer_norm_out = self._helper.create_variable_for_type_inference(
self._dtype)
self._helper.append_op(
type="layer_norm",
inputs=inputs,
outputs={
"Y": layer_norm_out,
"Mean": mean_out,
"Variance": variance_out,
},
attrs={
"epsilon": self._epsilon,
"begin_norm_axis": self._begin_norm_axis
})
return self._helper.append_activation(layer_norm_out, act=self._act)
class GRUUnit(layers.Layer):
"""
**GRU unit layer**
It creates a callable object from GRUUnit class.
If origin_mode is True, then the equation of a gru step is from paper
`Learning Phrase Representations using RNN Encoder-Decoder for Statistical
Machine Translation <https://arxiv.org/pdf/1406.1078.pdf>`_
.. math::
u_t & = actGate(xu_{t} + W_u h_{t-1} + b_u)
r_t & = actGate(xr_{t} + W_r h_{t-1} + b_r)
m_t & = actNode(xm_t + W_c dot(r_t, h_{t-1}) + b_m)
h_t & = dot(u_t, h_{t-1}) + dot((1-u_t), m_t)
If origin_mode is False, then the equation of a gru step is from paper
`Empirical Evaluation of Gated Recurrent Neural Networks on Sequence
Modeling <https://arxiv.org/pdf/1412.3555.pdf>`_
.. math::
u_t & = actGate(xu_{t} + W_u h_{t-1} + b_u)
r_t & = actGate(xr_{t} + W_r h_{t-1} + b_r)
m_t & = actNode(xm_t + W_c dot(r_t, h_{t-1}) + b_m)
h_t & = dot((1-u_t), h_{t-1}) + dot(u_t, m_t)
The inputs of gru unit includes :math:`z_t`, :math:`h_{t-1}`. In terms
of the equation above, the :math:`z_t` is split into 3 parts -
:math:`xu_t`, :math:`xr_t` and :math:`xm_t`. This means that in order to
implement a full GRU unit operator for an input, a fully
connected layer has to be applied, such that :math:`z_t = W_{fc}x_t`.
The terms :math:`u_t` and :math:`r_t` represent the update and reset gates
of the GRU cell. Unlike LSTM, GRU has one lesser gate. However, there is
an intermediate candidate hidden output, which is denoted by :math:`m_t`.
This layer has three outputs :math:`h_t`, :math:`dot(r_t, h_{t-1})`
and concatenation of :math:`u_t`, :math:`r_t` and :math:`m_t`.
Parameters:
name_scope(str): The name of this class.
size (int): The input dimension value.
param_attr(ParamAttr, optional): The parameter attribute for the learnable
hidden-hidden weight matrix.
**Note**:
1. The shape of the weight matrix is :math:`[T, 3*D]`, where D is the hidden size.
2. All elements in the weight matrix can be divided into two parts. The first
part are weights of the update gate and reset gate with shape :math:`[D, 2*D]`,
and the second part are weights for candidate hidden state with shape :math:`[D, D]`.
If it is set to None or one attribute of ParamAttr, gru_unit will
create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. The default
value is None.
bias_attr (ParamAttr|bool, optional): The parameter attribute for the bias
of GRU.Note that the bias with :math:`[1, 3*D]` concatenates
the bias in the update gate, reset gate and candidate calculations.
If it is set to False, no bias will be applied to the update gate,
reset gate and candidate calculations. If it is set to None or one
attribute of ParamAttr, gru_unit will create ParamAttr as
bias_attr. If the Initializer of the bias_attr is not set, the bias
is initialized zero. The default value is None.
activation (str): The activation type for cell (actNode).
The default value is 'tanh'.
gate_activation (str): The activation type for gates (actGate).
The default value is 'sigmoid'.
dtype(str): The dtype of the layers. The data type can be set as
'float32', 'float64'. The default value is 'float32'.
Attribute:
**weight** (Parameter): the learnable weights of this layer.
**bias** (Parameter): the learnable bias of this layer.
Returns:
tuple: The hidden value, reset-hidden value and gate values. The hidden value
is a 2-D tensor with shape :math:`[T, D]` . The reset-hidden value is a
2-D tensor with shape :math:`[T, D]` . The gate value is a 2-D tensor with
shape :math:`[T, 3*D]`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.dygraph.base as base
import numpy
lod = [[2, 4, 3]]
D = 5
T = sum(lod[0])
input = numpy.random.rand(T, 3 * D).astype('float32')
hidden_input = numpy.random.rand(T, D).astype('float32')
with fluid.dygraph.guard():
x = numpy.random.random((3, 32, 32)).astype('float32')
gru = fluid.dygraph.GRUUnit('gru', size=D * 3)
dy_ret = gru(
base.to_variable(input), base.to_variable(hidden_input))
"""
def __init__(self,
name_scope,
size,
param_attr=None,
bias_attr=None,
activation='tanh',
gate_activation='sigmoid',
origin_mode=False,
dtype='float32'):
super(GRUUnit, self).__init__(name_scope, dtype)
self._bias_attr = bias_attr
activation_dict = dict(
identity=0,
sigmoid=1,
tanh=2,
relu=3, )
self.activation = activation_dict[activation]
self.gate_activation = activation_dict[gate_activation]
self._dtype = dtype
size = size // 3
# create weight
self._weight = self.create_parameter(
attr=param_attr, shape=[size, 3 * size], dtype=dtype)
# create bias
bias_size = [1, 3 * size]
self._bias_size = bias_size
self._bias = self.create_parameter(
attr=bias_attr, shape=bias_size, dtype=dtype, is_bias=True)
@property
def weight(self):
return self._weight
@weight.setter
def weight(self, value):
self._weight = value
@property
def bias(self):
return self._bias
@bias.setter
def bias(self, value):
self._bias = value
def forward(self, input, hidden):
inputs = {'Input': input, 'HiddenPrev': hidden, 'Weight': self._weight}
if self._bias:
inputs['Bias'] = self._bias
gate = self._helper.create_variable_for_type_inference(self._dtype)
reset_hidden_pre = self._helper.create_variable_for_type_inference(
self._dtype)
updated_hidden = self._helper.create_variable_for_type_inference(
self._dtype)
self._helper.append_op(
type='gru_unit',
inputs=inputs,
outputs={
'Gate': gate,
'ResetHiddenPrev': reset_hidden_pre,
'Hidden': updated_hidden,
},
attrs={
'activation': self.activation,
'gate_activation': self.gate_activation,
})
return updated_hidden, reset_hidden_pre, gate
class NCE(layers.Layer):
"""
This interface is used to construct a callable object of the ``NCE`` class.
For more details, refer to code examples.
It implements the function of the ``NCE`` loss function.
By default this function uses a uniform distribution for sampling, and it
compute and return the noise-contrastive estimation training loss. See
`Noise-contrastive estimation: A new estimation principle for unnormalized statistical models <http://www.jmlr.org/proceedings/papers/v9/gutmann10a/gutmann10a.pdf>`_ .
Parameters:
name_scope(str): The name of this class.
num_total_classes (int): Total number of classes in all samples
param_attr (ParamAttr, optional): The parameter attribute for learnable weights(Parameter)
of nce. If it is set to None or one attribute of ParamAttr, nce
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
bias_attr (ParamAttr or bool, optional): The attribute for the bias of nce.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, nce
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
num_neg_samples (int, optional): The number of negative classes. The default value is 10.
sampler (str, optional): The sampler used to sample class from negtive classes.
It can be 'uniform', 'log_uniform' or 'custom_dist'.
default: 'uniform'.
custom_dist (float[], optional): A float[] with size=num_total_classes.
It is used when sampler is set to 'custom_dist'.
custom_dist[i] is the probability of i-th class to be sampled.
Default: None.
seed (int, optional): The seed used in sampler. Default: 0.
is_sparse(bool, optional): The flag indicating whether to use sparse update. If is_sparse is True, the weight@GRAD and bias@GRAD will be changed to SelectedRows. Default: False.
Attribute:
**weight** (Parameter): the learnable weights of this layer.
**bias** (Parameter or None): the learnable bias of this layer.
Returns:
None
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
window_size = 5
dict_size = 20
label_word = int(window_size // 2) + 1
inp_word = np.array([[1], [2], [3], [4], [5]]).astype('int64')
nid_freq_arr = np.random.dirichlet(np.ones(20) * 1000).astype('float32')
with fluid.dygraph.guard():
words = []
for i in range(window_size):
words.append(fluid.dygraph.base.to_variable(inp_word[i]))
emb = fluid.Embedding(
'embedding',
size=[dict_size, 32],
param_attr='emb.w',
is_sparse=False)
embs3 = []
for i in range(window_size):
if i == label_word:
continue
emb_rlt = emb(words[i])
embs3.append(emb_rlt)
embs3 = fluid.layers.concat(input=embs3, axis=1)
nce = fluid.NCE('nce',
num_total_classes=dict_size,
num_neg_samples=2,
sampler="custom_dist",
custom_dist=nid_freq_arr.tolist(),
seed=1,
param_attr='nce.w',
bias_attr='nce.b')
wl = fluid.layers.unsqueeze(words[label_word], axes=[0])
nce_loss3 = nce(embs3, wl)
"""
def __init__(self,
name_scope,
num_total_classes,
sample_weight=None,
param_attr=None,
bias_attr=None,
num_neg_samples=None,
sampler="uniform",
custom_dist=None,
seed=0,
is_sparse=False):
super(NCE, self).__init__(name_scope)
self._param_attr = param_attr
self._bias_attr = bias_attr
self._num_total_classes = num_total_classes
self._inputs = dict()
self._inputs['SampleWeight'] = sample_weight if sample_weight is not None else []
if sampler == "uniform":
sampler = 0
elif sampler == "log_uniform":
sampler = 1
elif sampler == "custom_dist":
assert custom_dist is not None
# assert isinstance(custom_dist, Variable)
custom_dist_len = len(custom_dist)
alias_probs_ = [0] * custom_dist_len
alias_ = [0] * custom_dist_len
bigs = []
littles = []
for i in range(custom_dist_len):
normal_prob = custom_dist[i] * custom_dist_len
if normal_prob - 1.0 > 0:
bigs.append((i, normal_prob))
elif 1.0 - normal_prob > 0:
littles.append((i, normal_prob))
else:
alias_probs_[i] = normal_prob
alias_[i] = -1
while len(bigs) and len(littles):
big = bigs.pop(0)
little = littles.pop(0)
big_idx = big[0]
big_prob = big[1]
alias_probs_[little[0]] = little[1]
alias_[little[0]] = big_idx
big_left = big[1] + little[1] - 1
if big_left - 1.0 > 0:
bigs.append((big_idx, big_left))
elif 1.0 - big_left > 0:
littles.append((big_idx, big_left))
else:
alias_probs_[big_idx] = big_left
alias_[big_idx] = -1
if len(bigs):
big = bigs.pop(0)
alias_probs_[big[0]] = 1.0
alias_[big[0]] = -1
if len(littles):
little = littles.pop(0)
alias_probs_[little[0]] = 1.0
alias_[little[0]] = -1
def _init_by_numpy_array(numpy_array):
ret = self.create_parameter(
attr=ParamAttr(),
shape=numpy_array.shape,
dtype=numpy_array.dtype,
default_initializer=NumpyArrayInitializer(numpy_array))
ret.stop_gradient = True
return ret
self._inputs['CustomDistProbs'] = _init_by_numpy_array(
np.array(custom_dist).astype('float32'))
self._inputs['CustomDistAlias'] = _init_by_numpy_array(
np.array(alias_).astype('int32'))
self._inputs['CustomDistAliasProbs'] = _init_by_numpy_array(
np.array(alias_probs_).astype('float32'))
sampler = 2
else:
raise Exception("Unsupported sampler type.")
if num_neg_samples is None:
num_neg_samples = 10
else:
num_neg_samples = int(num_neg_samples)
self._num_neg_samples = num_neg_samples
remote_prefetch = is_sparse
print(
"With sparse mode, if your models has only small parameter prefetch may cause speed down"
)
self._attrs = {
'num_total_classes': int(num_total_classes),
'num_neg_samples': num_neg_samples,
'seed': seed,
'sampler': sampler,
'is_sparse': is_sparse,
'remote_prefetch': remote_prefetch
}
def _build_once(self, input, label, sample_weight=None):
assert isinstance(input, Variable)
assert isinstance(label, Variable)
dim = input.shape[1]
num_true_class = label.shape[1]
self._w = self.create_parameter(
attr=self._param_attr,
shape=[self._num_total_classes, dim],
is_bias=False,
dtype=input.dtype)
if self._bias_attr:
self._b = self.create_parameter(
attr=self._bias_attr,
shape=[self._num_total_classes, 1],
is_bias=True,
dtype=input.dtype)
self._inputs['Bias'] = self._b
self._inputs['Weight'] = self._w
@property
def weight(self):
return self._w
@weight.setter
def weight(self, value):
self._w = value
@property
def bias(self):
return self._b
@bias.setter
def bias(self, value):
self._b = value
def forward(self, input, label, sample_weight=None):
assert isinstance(input, Variable)
assert isinstance(label, Variable)
self._inputs['Input'] = input
self._inputs['Label'] = label
self._inputs['SampleWeight'] = sample_weight if sample_weight is not None else []
cost = self._helper.create_variable_for_type_inference(
dtype=input.dtype)
sample_logits = self._helper.create_variable_for_type_inference(
dtype=input.dtype)
sample_labels = self._helper.create_variable_for_type_inference(
dtype=label.dtype)
self._helper.append_op(
type='nce',
inputs=self._inputs,
outputs={
'Cost': cost,
'SampleLogits': sample_logits,
'SampleLabels': sample_labels
},
attrs=self._attrs)
return cost / (self._num_neg_samples + 1)
class PRelu(layers.Layer):
"""
This interface is used to construct a callable object of the ``PRelu`` class.
For more details, refer to code examples.
It implements three activation methods of the ``PRelu`` activation function.
Equation:
.. math::
y = \max(0, x) + \\alpha * \min(0, x)
Parameters:
name_scope(str): The name of this class.
mode (str): The mode for weight sharing. It supports all, channel
and element. all: all elements share same weight
channel:elements in a channel share same weight
element:each element has a weight
param_attr(ParamAttr, optional): The parameter attribute for the learnable
weight (alpha). Default: None.
Attribute:
**weight** (Parameter): the learnable weights of this layer.
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
import numpy as np
inp_np = np.ones([5, 200, 100, 100]).astype('float32')
with fluid.dygraph.guard():
inp_np = to_variable(inp_np)
mode = 'channel'
prelu = fluid.PRelu(
'prelu',
mode=mode,
param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(1.0)))
dy_rlt = prelu(inp_np)
"""
def __init__(self, name_scope, mode, param_attr=None):
super(PRelu, self).__init__(name_scope)
self._mode = mode
self._param_attr = param_attr
if self._mode not in ['all', 'channel', 'element']:
raise ValueError('mode should be one of all, channel, element.')
self._alpha_shape = [1]
def _build_once(self, input):
if self._mode == 'channel':
self._alpha_shape = [1, input.shape[1], 1, 1]
elif self._mode == 'element':
self._alpha_shape = input.shape
self._dtype = self._helper.input_dtype(input)
self._alpha = self.create_parameter(
attr=self._param_attr,
shape=self._alpha_shape,
dtype='float32',
is_bias=False,
default_initializer=Constant(1.0))
@property
def weight(self):
return self._alpha
@weight.setter
def weight(self, value):
self._alpha = value
def forward(self, input):
out = self._helper.create_variable_for_type_inference(self._dtype)
self._helper.append_op(
type="prelu",
inputs={"X": input,
'Alpha': self._alpha},
attrs={"mode": self._mode},
outputs={"Out": out})
return out
class BilinearTensorProduct(layers.Layer):
"""
**Add Bilinear Tensor Product Layer**
This layer performs bilinear tensor product on two inputs.
For example:
.. math::
out_{i} = x * W_{i} * {y^\mathrm{T}}, i=0,1,...,size-1
In this formula:
- :math:`x`: the first input contains M elements, shape is [batch_size, M].
- :math:`y`: the second input contains N elements, shape is [batch_size, N].
- :math:`W_{i}`: the i-th learned weight, shape is [M, N]
- :math:`out_{i}`: the i-th element of out, shape is [batch_size, size].
- :math:`y^\mathrm{T}`: the transpose of :math:`y`.
Parameters:
name_scope(str): The name of this class.
size (int): The dimension of this layer.
name (str): The default value is None. Normally there is no need for user
to set this property. For more information, please refer to :ref:`api_guide_Name`.
act (str, optional): Activation to be applied to the output of this layer. The default value is None.
param_attr (ParamAttr, optional): The parameter attribute for the learnable w, parameters/weights of
this layer. The default value is None.
bias_attr (ParamAttr, optional): The parameter attribute for the bias
of this layer. If it is set to False, no bias will be added to the output units.
If it is set to None, the bias is initialized zero. The default value is None.
Attribute:
**weight** (Parameter): the learnable weights of this layer.
**bias** (Parameter): the learnable bias of this layer.
Returns:
Variable: A 2-D Tensor of shape [batch_size, size].
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
with fluid.dygraph.guard():
layer1 = numpy.random.random((5, 5)).astype('float32')
layer2 = numpy.random.random((5, 4)).astype('float32')
bilinearTensorProduct = fluid.dygraph.nn.BilinearTensorProduct(
'BilinearTensorProduct', size=1000)
ret = bilinearTensorProduct(fluid.dygraph.base.to_variable(layer1),
fluid.dygraph.base.to_variable(layer2))
"""
def __init__(self,
name_scope,
size,
name=None,
act=None,
param_attr=None,
bias_attr=None):
super(BilinearTensorProduct, self).__init__(name_scope)
self._param_attr = param_attr
self._bias_attr = bias_attr
self._act = act
self._size = size
self._name = name
self._inputs = dict()
def _build_once(self, x, y):
self._dtype = self._helper.input_dtype(x)
param_shape = [self._size, x.shape[1], y.shape[1]]
self._w = self.create_parameter(
attr=self._param_attr,
shape=param_shape,
dtype=self._dtype,
is_bias=False)
bias_size = [1, self._size]
self._bias_param = self.create_parameter(
attr=self._bias_attr,
shape=bias_size,
dtype=self._dtype,
is_bias=True)
@property
def weight(self):
return self._w
@weight.setter
def weight(self, value):
self._w = value
@property
def bias(self):
return self._bias_param
@bias.setter
def bias(self, value):
self._bias_param = value
def forward(self, x, y):
self._inputs = {"X": x, "Y": y, "Weight": self._w}
if self._bias_param:
self._inputs["Bias"] = self._bias_param
if self._name is not None:
out = self._helper.create_variable(
name=".".join([self.full_name(), self._name]),
dtype=self._dtype,
persistable=False)
else:
out = self._helper.create_variable(
dtype=self._dtype, persistable=False)
self._helper.append_op(
type="bilinear_tensor_product",
inputs=self._inputs,
outputs={"Out": out})
# add activation
return self._helper.append_activation(out, act=self._act)
class Conv2DTranspose(layers.Layer):
"""
This interface is used to construct a callable object of the ``Conv2DTranspose`` class.
For more details, refer to code examples.
The convolution2D transpose layer calculates the output based on the input,
filter, and dilations, strides, paddings. Input and output
are in NCHW format. Where N is batch size, C is the number of feature map,
H is the height of the feature map, and W is the width of the feature map.
Filter's shape is [MCHW] , where M is the number of input feature map,
C is the number of output feature map, H is the height of the filter,
and W is the width of the filter. If the groups is greater than 1,
C will equal the number of input feature map divided by the groups.
If bias attribution and activation type are provided, bias is added to
the output of the convolution, and the corresponding activation function
is applied to the final result.
The details of convolution transpose layer, please refer to the following explanation and references
`conv2dtranspose <http://www.matthewzeiler.com/wp-content/uploads/2017/07/cvpr2010.pdf>`_ .
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \\ast X + b)
Where:
* :math:`X`: Input value, a ``Tensor`` with NCHW format.
* :math:`W`: Filter value, a ``Tensor`` with shape [MCHW] .
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D ``Tensor`` with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, C_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{in}, C_{out}, H_f, W_f)`
- Output:
Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`
Where
.. math::
H^\prime_{out} &= (H_{in} - 1) * strides[0] - 2 * paddings[0] + dilations[0] * (H_f - 1) + 1 \\\\
W^\prime_{out} &= (W_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (W_f - 1) + 1 \\\\
H_{out} &\in [ H^\prime_{out}, H^\prime_{out} + strides[0] ) \\\\
W_{out} &\in [ W^\prime_{out}, W^\prime_{out} + strides[1] )
Parameters:
name_scope(str): The name of this class.
num_filters(int): The number of the filter. It is as same as the output
feature map.
output_size(int or tuple, optional): The output image size. If output size is a
tuple, it must contain two integers, (image_H, image_W). None if use
filter_size, padding, and stride to calculate output_size.
if output_size and filter_size are specified at the same time, They
should follow the formula above. Default: None.
filter_size(int or tuple, optional): The filter size. If filter_size is a tuple,
it must contain two integers, (filter_size_H, filter_size_W).
Otherwise, the filter will be a square. None if use output size to
calculate filter_size. Default: None.
padding(int or tuple, optional): The padding size. If padding is a tuple, it must
contain two integers, (padding_H, padding_W). Otherwise, the
padding_H = padding_W = padding. Default: 0.
stride(int or tuple, optional): The stride size. If stride is a tuple, it must
contain two integers, (stride_H, stride_W). Otherwise, the
stride_H = stride_W = stride. Default: 1.
dilation(int or tuple, optional): The dilation size. If dilation is a tuple, it must
contain two integers, (dilation_H, dilation_W). Otherwise, the
dilation_H = dilation_W = dilation. Default: 1.
groups(int, optional): The groups number of the Conv2d transpose layer. Inspired by
grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
when group=2, the first half of the filters is only connected to the
first half of the input channels, while the second half of the
filters is only connected to the second half of the input channels.
Default: 1.
param_attr (ParamAttr, optional): The parameter attribute for learnable weights(Parameter)
of conv2d_transpose. If it is set to None or one attribute of ParamAttr, conv2d_transpose
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
bias_attr (ParamAttr or bool, optional): The attribute for the bias of conv2d_transpose.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv2d_transpose
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
use_cudnn(bool, optional): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. Default: True.
act (str, optional): Activation type, if it is set to None, activation is not appended.
Default: None.
Attribute:
**weight** (Parameter): the learnable weights of filters of this layer.
**bias** (Parameter or None): the learnable bias of this layer.
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
with fluid.dygraph.guard():
data = np.random.random((3, 32, 32, 5)).astype('float32')
conv2DTranspose = fluid.dygraph.nn.Conv2DTranspose(
'Conv2DTranspose', num_filters=2, filter_size=3)
ret = conv2DTranspose(fluid.dygraph.base.to_variable(data))
"""
def __init__(self,
name_scope,
num_filters,
output_size=None,
filter_size=None,
padding=0,
stride=1,
dilation=1,
groups=None,
param_attr=None,
bias_attr=None,
use_cudnn=True,
act=None):
super(Conv2DTranspose, self).__init__(name_scope)
assert param_attr is not False, "param_attr should not be False in conv2d_transpose."
self._param_attr = param_attr
self._bias_attr = bias_attr
self._act = act
self._groups = groups
self._num_filters = num_filters
self._use_cudnn = use_cudnn
self._padding = padding
self._stride = stride
self._dilation = dilation
self._filter_size = filter_size
self._output_size = output_size
self._op_type = 'conv2d_transpose'
def _build_once(self, input):
input_channel = input.shape[1]
if (input_channel == self._groups and
self._num_filters == input_channel and not self._use_cudnn):
self._op_type = 'depthwise_conv2d_transpose'
if not isinstance(input, Variable):
raise TypeError("Input of conv2d_transpose must be Variable")
self._padding = utils.convert_to_list(self._padding, 2, 'padding')
self._stride = utils.convert_to_list(self._stride, 2, 'stride')
self._dilation = utils.convert_to_list(self._dilation, 2, 'dilation')
if not isinstance(self._use_cudnn, bool):
raise ValueError("use_cudnn should be True or False")
if self._filter_size is None:
if self._output_size is None:
raise ValueError(
"output_size must be set when filter_size is None")
if isinstance(self._output_size, int):
self._output_size = [self._output_size, self._output_size]
h_in = input.shape[2]
w_in = input.shape[3]
filter_size_h = (self._output_size[0] -
(h_in - 1) * self._stride[0] + 2 * self._padding[0]
- 1) // self._dilation[0] + 1
filter_size_w = (self._output_size[1] -
(w_in - 1) * self._stride[1] + 2 * self._padding[1]
- 1) // self._dilation[1] + 1
self._filter_size = [filter_size_h, filter_size_w]
else:
self._filter_size = utils.convert_to_list(
self._filter_size, 2, 'conv2d_transpose.filter_size')
if self._output_size is None:
self._output_size = []
elif isinstance(self._output_size, list) or isinstance(
self._output_size, int):
self._output_size = utils.convert_to_list(self._output_size, 2,
'output_size')
else:
raise ValueError("output_size should be list or int")
self._padding = utils.convert_to_list(self._padding, 2, 'padding')
self._groups = 1 if self._groups is None else self._groups
filter_shape = [input_channel, self._num_filters // self._groups
] + self._filter_size
self._img_filter = self.create_parameter(
dtype=input.dtype, shape=filter_shape, attr=self._param_attr)
self._bias_param = self.create_parameter(
attr=self._bias_attr,
shape=[self._num_filters],
dtype=self._dtype,
is_bias=True)
@property
def weight(self):
return self._img_filter
@weight.setter
def weight(self, value):
self._img_filter = value
@property
def bias(self):
return self._bias_param
@bias.setter
def bias(self, value):
self._bias_param = value
def forward(self, input):
pre_bias = self._helper.create_variable_for_type_inference(
dtype=input.dtype)
self._helper.append_op(
type=self._op_type,
inputs={'Input': [input],
'Filter': [self._img_filter]},
outputs={'Output': pre_bias},
attrs={
'output_size': self._output_size,
'strides': self._stride,
'paddings': self._padding,
'dilations': self._dilation,
'groups': self._groups,
'use_cudnn': self._use_cudnn
})
if self._bias_param is not None:
pre_act = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type='elementwise_add',
inputs={'X': [pre_bias],
'Y': [self._bias_param]},
outputs={'Out': [pre_act]},
attrs={'axis': 1})
else:
pre_act = pre_bias
out = self._helper.append_activation(pre_act, act=self._act)
return out
class SequenceConv(layers.Layer):
"""
This function creates the op for sequence_conv, using the inputs and
other convolutional configurations for the filters and stride as given
in the input parameters to the function.
Parameters:
name_scope(str): The name of this class.
num_filters (int): number of filters.
filter_size (int): the filter size (H and W). Default: 3.
filter_stride (int): stride of the filter. Default: 1.
padding (bool|None): if True, add paddings. Default: None
bias_attr (ParamAttr|bool|None): The parameter attribute for the bias of sequence_conv.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, sequence_conv
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights
of sequence_conv. If it is set to None or one attribute of ParamAttr, sequence_conv
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
act (str): Activation type, if it is set to None, activation is not appended.
Default: None.
Attributes:
weight (Parameter): the learnable weights of filters of this layer.
bias (Parameter|None): the learnable bias of this layer.
Returns:
Variable: output of sequence_conv
"""
def __init__(self,
name_scope,
num_filters,
filter_size=3,
filter_stride=1,
padding=None,
bias_attr=None,
param_attr=None,
act=None):
assert not in_dygraph_mode(
), "SequenceConv is not supported by dynamic graph mode yet!"
super(SequenceConv, self).__init__(name_scope)
self._num_filters = num_filters
self._filter_size = filter_size
self._filter_stride = filter_stride
self._padding = padding
self._bias_attr = bias_attr
self._param_attr = param_attr
self._act = act
def _build_once(self, input):
self._dtype = self._helper.input_dtype(input)
filter_shape = [self._filter_size * input.shape[1], self._num_filters]
self._filter_param = self.create_parameter(
attr=self._param_attr, shape=filter_shape, dtype=self._dtype)
self._bias_param = self.create_parameter(
attr=self._bias_attr,
shape=[self._num_filters],
dtype=self._dtype,
is_bias=True)
def forward(self, input):
pre_bias = self._helper.create_variable_for_type_inference(self._dtype)
self._helper.append_op(
type='sequence_conv',
inputs={
'X': [input],
'Filter': [self._filter_param],
},
outputs={"Out": pre_bias},
attrs={
'contextStride': self._filter_stride,
'contextStart': -int(self._filter_size // 2),
'contextLength': self._filter_size
})
if self._bias_param is not None:
pre_act = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type='elementwise_add',
inputs={'X': [pre_bias],
'Y': [self._bias_param]},
outputs={'Out': [pre_act]},
attrs={'axis': 1})
else:
pre_act = pre_bias
return self._helper.append_activation(pre_act, act=self._act)
class RowConv(layers.Layer):
"""
***Row-convolution operator***
The row convolution is called lookahead convolution. This operator was introduced in the following paper for DeepSpeech2:
http://www.cs.cmu.edu/~dyogatam/papers/wang+etal.iclrworkshop2016.pdf
The main motivation is that a bidirectional RNN, useful in DeepSpeech like speech models, learns representation for a sequence by performing a
forward and a backward pass through the entire sequence. However, unlike
unidirectional RNNs, bidirectional RNNs are challenging to deploy in an online
and low-latency setting. The lookahead convolution incorporates information
from future subsequences in a computationally efficient manner to improve
unidirectional recurrent neural networks. The row convolution operator is
different from the 1D sequence convolution, and is computed as follows:
Given an input sequence X of length t and input dimension D, and a filter (W) of size context * D.
More details about row_conv please refer to the design document https://github.com/PaddlePaddle/Paddle/issues/2228#issuecomment-303903645 .
Parameters:
name_scope(str): The name of this class.
future_context_size (int): Future context size. Please note, the shape
of convolution kernel is [future_context_size + 1, D].
param_attr (ParamAttr): Attributes of parameters, including
name, initializer etc. Default: None.
act (str): Non-linear activation to be applied to output variable. Default: None.
Attributes:
weight (Parameter): the learnable weights of this layer.
Returns:
the output(Out) is a LodTensor, which supports variable time-length input sequences.
The underlying tensor in this LodTensor is a matrix with shape T x N, i.e., the same shape as X.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
with fluid.dygraph.guard():
x = numpy.random.random((16)).astype('float32')
rowConv = fluid.dygraph.nn.RowConv(
'RowConv', future_context_size=2)
ret = rowConv(fluid.dygraph.base.to_variable(x))
"""
def __init__(self,
name_scope,
future_context_size,
param_attr=None,
act=None):
assert not in_dygraph_mode(
), "RowConv is not supported by dynamic graph mode yet!"
super(RowConv, self).__init__(name_scope)
self._act = act
self._param_attr = param_attr
self._future_context_size = future_context_size
def _build_once(self, input):
self._dtype = self._helper.input_dtype(input)
filter_shape = [self._future_context_size + 1, input.shape[1]]
self._filter_param = self.create_parameter(
attr=self._param_attr,
shape=filter_shape,
dtype=self._dtype,
is_bias=False)
def forward(self, input):
out = self._helper.create_variable_for_type_inference(self._dtype)
self._helper.append_op(
type='row_conv',
inputs={'X': [input],
'Filter': [self._filter_param]},
outputs={'Out': [out]})
return self._helper.append_activation(out, act=self._act)
class GroupNorm(layers.Layer):
"""
This interface is used to construct a callable object of the ``GroupNorm`` class.
For more details, refer to code examples.
It implements the function of the Group Normalization Layer.
Refer to `Group Normalization <https://arxiv.org/abs/1803.08494>`_ .
Parameters:
name_scope(str): The name of this class.
groups(int): The number of groups that divided from channels.
epsilon(float, optional): The small value added to the variance to prevent
division by zero. Default: 1e-05.
param_attr(ParamAttr, optional): The parameter attribute for the learnable
scale :math:`g`. If it is set to False, no scale will be added to the output units.
If it is set to None, the bias is initialized one. Default: None.
bias_attr(ParamAttr, optional): The parameter attribute for the learnable
bias :math:`b`. If it is set to False, no bias will be added to the output units.
If it is set to None, the bias is initialized zero. Default: None.
act(str, optional): Activation to be applied to the output of group normalizaiton. Default: None.
data_layout(str, optional): Specify the input data format. Only NCHW is supported. Default: NCHW.
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
with fluid.dygraph.guard():
x = np.random.random((8, 32, 32)).astype('float32')
groupNorm = fluid.dygraph.nn.GroupNorm('GroupNorm', groups=4)
ret = groupNorm(fluid.dygraph.base.to_variable(x))
"""
def __init__(self,
name_scope,
groups,
epsilon=1e-05,
param_attr=None,
bias_attr=None,
act=None,
data_layout='NCHW'):
super(GroupNorm, self).__init__(name_scope)
self._param_attr = param_attr
self._bias_attr = bias_attr
self._epsilon = epsilon
self._groups = groups
self._act = act
if data_layout != 'NCHW':
raise ValueError("unsupported data layout:" + data_layout)
def _build_once(self, input):
self._dtype = self._helper.input_dtype(input)
param_shape = [input.shape[1]]
if self._bias_attr:
self._bias = self.create_parameter(
attr=self._bias_attr,
shape=param_shape,
dtype=self._dtype,
is_bias=True)
if self._param_attr:
self._scale = self.create_parameter(
attr=self._param_attr,
shape=param_shape,
dtype=self._dtype,
default_initializer=Constant(1.0))
def forward(self, input):
inputs = {'X': input}
if self._bias_attr:
inputs['Bias'] = self._bias
if self._param_attr:
inputs['Scale'] = self._scale
# create output
mean_out = self._helper.create_variable_for_type_inference(
dtype=self._dtype, stop_gradient=True)
variance_out = self._helper.create_variable_for_type_inference(
dtype=self._dtype, stop_gradient=True)
group_norm_out = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type="group_norm",
inputs=inputs,
outputs={
"Y": group_norm_out,
"Mean": mean_out,
"Variance": variance_out,
},
attrs={"epsilon": self._epsilon,
"groups": self._groups})
return self._helper.append_activation(group_norm_out, self._act)
class SpectralNorm(layers.Layer):
"""
This interface is used to construct a callable object of the ``SpectralNorm`` class.
For more details, refer to code examples. It implements the function of the Spectral Normalization Layer.
This layer calculates the spectral normalization value of weight parameters of
fc, conv1d, conv2d, conv3d layers which should be 2-D, 3-D, 4-D, 5-D
Parameters. Calculations are showed as follows.
Step 1:
Generate vector U in shape of [H], and V in shape of [W].
While H is the :attr:`dim` th dimension of the input weights,
and W is the product result of remaining dimensions.
Step 2:
:attr:`power_iters` shoule be a positive interger, do following
calculations with U and V for :attr:`power_iters` rounds.
.. math::
\mathbf{v} := \\frac{\mathbf{W}^{T} \mathbf{u}}{\|\mathbf{W}^{T} \mathbf{u}\|_2}
\mathbf{u} := \\frac{\mathbf{W}^{T} \mathbf{v}}{\|\mathbf{W}^{T} \mathbf{v}\|_2}
Step 3:
Calculate :math:`\sigma(\mathbf{W})` and normalize weight values.
.. math::
\sigma(\mathbf{W}) = \mathbf{u}^{T} \mathbf{W} \mathbf{v}
\mathbf{W} = \\frac{\mathbf{W}}{\sigma(\mathbf{W})}
Refer to `Spectral Normalization <https://arxiv.org/abs/1802.05957>`_ .
Parameters:
name_scope(str): The name of this class.
dim(int, optional): The index of dimension which should be permuted to the first before reshaping Input(Weight) to matrix, it should be set as 0 if Input(Weight) is the weight of fc layer, and should be set as 1 if Input(Weight) is the weight of conv layer. Default: 0.
power_iters(int, optional): The number of power iterations to calculate spectral norm. Default: 1.
eps(float, optional): The epsilon for numerical stability in calculating norms. Default: 1e-12.
name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
with fluid.dygraph.guard():
x = np.random.random((2, 8, 32, 32)).astype('float32')
spectralNorm = fluid.dygraph.nn.SpectralNorm('SpectralNorm', dim=1, power_iters=2)
ret = spectralNorm(fluid.dygraph.base.to_variable(x))
"""
def __init__(self, name_scope, dim=0, power_iters=1, eps=1e-12, name=None):
super(SpectralNorm, self).__init__(name_scope)
self._power_iters = power_iters
self._eps = eps
self._dim = dim
def _build_once(self, weight):
self._dtype = self._helper.input_dtype(weight)
input_shape = weight.shape
h = input_shape[self._dim]
w = np.prod(input_shape) // h
self.u = self.create_parameter(
attr=ParamAttr(),
shape=[h],
dtype=self._dtype,
default_initializer=Normal(0., 1.))
self.u.stop_gradient = True
self.v = self.create_parameter(
attr=ParamAttr(),
shape=[w],
dtype=self._dtype,
default_initializer=Normal(0., 1.))
self.v.stop_gradient = True
def forward(self, weight):
inputs = {'Weight': weight, 'U': self.u, 'V': self.v}
out = self._helper.create_variable_for_type_inference(self._dtype)
self._helper.append_op(
type="spectral_norm",
inputs=inputs,
outputs={"Out": out, },
attrs={
"dim": self._dim,
"power_iters": self._power_iters,
"eps": self._eps,
})
return out
class TreeConv(layers.Layer):
"""
This interface is used to construct a callable object of the ``TreeConv`` class.
For more details, refer to code examples.
Tree-Based Convolution is a kind of convolution based on tree structure.
Tree-Based Convolution is a part of Tree-Based Convolution Neural Network(TBCNN),
which is used to classify tree structures, such as Abstract Syntax Tree.
Tree-Based Convolution proposed a kind of data structure called continuous binary tree,
which regards multiway tree as binary tree.
The paper of Tree-Based Convolution Operator is here: `tree-based convolution <https://arxiv.org/abs/1409.5718v1/>`_ .
Parameters:
name_scope(str): The name of this class.
output_size(int): output feature width.
num_filters(int, optional): number of filters, Default: 1.
max_depth(int, optional): max depth of filters, Default: 2.
act(str, optional): activation function, Default: tanh.
param_attr(ParamAttr, optional): the parameter attribute for the filters, Default: None.
bias_attr(ParamAttr, optional): the parameter attribute for the bias of this layer, Default: None.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Attribute:
**weight** (Parameter): the learnable weights of filters of this layer.
**bias** (Parameter or None): the learnable bias of this layer.
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
with fluid.dygraph.guard():
nodes_vector = numpy.random.random((1, 10, 5)).astype('float32')
edge_set = numpy.random.random((1, 9, 2)).astype('int32')
treeConv = fluid.dygraph.nn.TreeConv(
'TreeConv', output_size=6, num_filters=1, max_depth=2)
ret = treeConv(fluid.dygraph.base.to_variable(nodes_vector), fluid.dygraph.base.to_variable(edge_set))
"""
def __init__(self,
name_scope,
output_size,
num_filters=1,
max_depth=2,
act='tanh',
param_attr=None,
bias_attr=None,
name=None):
super(TreeConv, self).__init__(name_scope)
self._name = name
self._output_size = output_size
self._act = act
self._max_depth = max_depth
self._num_filters = num_filters
self._bias_attr = bias_attr
self._param_attr = param_attr
def _build_once(self, nodes_vector, edge_set):
assert isinstance(nodes_vector, Variable)
assert isinstance(edge_set, Variable)
self._dtype = self._helper.input_dtype(nodes_vector)
feature_size = nodes_vector.shape[2]
w_shape = [feature_size, 3, self._output_size, self._num_filters]
if self._bias_attr:
self._bias_param = self.create_parameter(
attr=self._bias_attr,
shape=[self._num_filters],
dtype=self._dtype,
is_bias=True)
self.W = self.create_parameter(
attr=self._param_attr,
shape=w_shape,
dtype=self._dtype,
is_bias=False)
@property
def weight(self):
return self.W
@weight.setter
def weight(self, value):
self.W = value
@property
def bias(self):
return self._bias_param
@bias.setter
def bias(self, value):
self._bias_param = value
def forward(self, nodes_vector, edge_set):
if self._name:
out = self.create_variable(
name=self._name, dtype=self._dtype, persistable=False)
else:
out = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type='tree_conv',
inputs={
'NodesVector': nodes_vector,
'EdgeSet': edge_set,
'Filter': self.W
},
outputs={'Out': out, },
attrs={'max_depth': self._max_depth})
if self._bias_attr:
pre_activation = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type='elementwise_add',
inputs={'X': [out],
'Y': [self._bias_param]},
outputs={'Out': [pre_activation]},
attrs={'axis': 1})
else:
pre_activation = out
return self._helper.append_activation(pre_activation, act=self._act)
| 40.718024
| 277
| 0.595469
|
f1c77b73375905f8b655cb71e310bdc3ffcacfbf
| 399
|
py
|
Python
|
exercise039.py
|
AlissonRaphael/python_exercises
|
3f1185c4f2fff24c9fa2ffd6b60f90599044c985
|
[
"MIT"
] | null | null | null |
exercise039.py
|
AlissonRaphael/python_exercises
|
3f1185c4f2fff24c9fa2ffd6b60f90599044c985
|
[
"MIT"
] | null | null | null |
exercise039.py
|
AlissonRaphael/python_exercises
|
3f1185c4f2fff24c9fa2ffd6b60f90599044c985
|
[
"MIT"
] | null | null | null |
from datetime import date
nascimento = int(input('Ano de nascimento: '))
atual = date.today().year
idade = atual - nascimento
if idade > 18:
print('Você tem {} anos. Deveria ter se alistado á {} anos.'.format(idade, idade-18))
elif idade < 18:
print('Você tem {} anos. Deverá se alistar em {} anos.'.format(idade, 18-idade))
else:
print('Voce tem 18 anos, deverá se alistar imediatamente.')
| 30.692308
| 87
| 0.694236
|
646d6d3a35529a050a70b4a06b3c16eec7973452
| 9,046
|
py
|
Python
|
src/pokemaster2/pokemon.py
|
kipyin/pokemaster2
|
816af178de1075b5b4b5137dbe1b9d9dd0bdef2a
|
[
"MIT"
] | 1
|
2022-01-10T17:13:12.000Z
|
2022-01-10T17:13:12.000Z
|
src/pokemaster2/pokemon.py
|
kipyin/pokemaster2
|
816af178de1075b5b4b5137dbe1b9d9dd0bdef2a
|
[
"MIT"
] | 22
|
2021-12-22T05:21:29.000Z
|
2022-03-28T05:26:09.000Z
|
src/pokemaster2/pokemon.py
|
kipyin/pokemaster2
|
816af178de1075b5b4b5137dbe1b9d9dd0bdef2a
|
[
"MIT"
] | null | null | null |
"""Base Pokemon."""
import operator
from typing import Callable, Sequence, Type, TypeVar, Union
import attr
from pokemaster2.prng import PRNG
S = TypeVar("S", bound="Stats")
P = TypeVar("P", bound="BasePokemon")
STAT_NAMES = ["hp", "atk", "def_", "spatk", "spdef", "spd"]
STAT_NAMES_FULL = {
"hp": "hp",
"attack": "atk",
"defense": "def_",
"special-attack": "spatk",
"special-defense": "spdef",
"speed": "spd",
}
prng = PRNG()
@attr.s(auto_attribs=True)
class Stats:
"""Generic stats, can be used for Pokemon stats/IV/EV."""
hp: int
atk: int
def_: int
spatk: int
spdef: int
spd: int
def __add__(self: S, other: Union[S, int]) -> S:
"""Pointwise addition."""
return self._make_operator(operator.add, other)
def __sub__(self: S, other: Union[S, int]) -> S:
"""Pointwise subtraction."""
return self._make_operator(operator.sub, other)
def __mul__(self: S, other: Union[S, int]) -> S:
"""Pointwise multiplication."""
return self._make_operator(operator.mul, other)
def __floordiv__(self: S, other: Union[S, int]) -> S:
"""Pointwise floor division."""
return self._make_operator(operator.floordiv, other)
__radd__ = __add__
__rmul__ = __mul__
def _make_operator(
self: S,
operator: Callable[[int, int], int],
other: Union[S, int],
) -> S:
"""Programmatically create point-wise operators.
Args:
operator: A callable (Real, Real) -> Real.
other: If `other` is a `Stats` instance, then the
operator will be applied point-wisely. If `other` is a
number, then a scalar operation will be applied.
Raises:
TypeError: `other` should be either another `Stats` or `int`.
Returns:
A `Stats` instance.
"""
names = (
"hp",
"atk",
"def_",
"spatk",
"spdef",
"spd",
)
if not isinstance(other, type(self)) and not isinstance(other, int):
raise TypeError(
f"unsupported operand type(s) for {operator}: "
f"'{type(self)}' and '{type(other)}'"
)
result_stats = {}
for stat in names:
if isinstance(other, type(self)):
result_stats[stat] = int(operator(getattr(self, stat), getattr(other, stat)))
elif isinstance(other, int):
result_stats[stat] = int(operator(getattr(self, stat), other))
return self.__class__(**result_stats)
def validate_iv(self: S) -> bool:
"""Check if each IV is between 0 and 32."""
for stat in STAT_NAMES:
if not 0 <= getattr(self, stat) <= 32:
raise ValueError(
f"The {stat} IV ({getattr(self, stat)}) must be a number "
"between 0 and 32 inclusive."
)
return True
@classmethod
def create_iv(cls: Type[S], gene: int) -> S:
"""Create IV stats from a Pokémon's gene.
Args:
gene: An `int` generated by the PRNG.
Returns:
A `Stats` instance.
"""
return cls(
hp=gene % 32,
atk=(gene >> 5) % 32,
def_=(gene >> 10) % 32,
spd=(gene >> 16) % 32,
spatk=(gene >> 21) % 32,
spdef=(gene >> 26) % 32,
)
@classmethod
def zeros(cls: Type[S]) -> S:
"""Empty Stats."""
return cls(
hp=0,
atk=0,
def_=0,
spatk=0,
spdef=0,
spd=0,
)
@classmethod
def nature_modifiers(cls: Type[S], nature: str) -> S:
"""Generate nature modifier Stats."""
# nature_data = _db.get_nature(identifier=nature)
# modifiers = {}
# for stat in STAT_NAMES:
# modifiers[stat] = 1
# if nature_data.is_neutral:
# return cls(**modifiers)
# modifiers[STAT_NAMES_FULL[nature_data.increased_stat.identifier]] = 1.1
# modifiers[STAT_NAMES_FULL[nature_data.decreased_stat.identifier]] = 0.9
# return cls(**modifiers)
@attr.s(auto_attribs=True)
class BasePokemon:
"""The underlying structure of a Pokémon.
No fancy initializations, no consistency checks, just a very basic
Pokémon model. Anything is possible with this BasePokemon. This
class also contains common and basic behaviors of Pokémon, such as
leveling-up, learning/forgetting moves, evolving into another
Pokémon, etc.
This class is never meant to be instantiated directly.
"""
national_id: int
species: str
types: Sequence[str]
item_held: str
exp: int
level: int
base_stats: Stats
iv: Stats
current_stats: Stats
stats: Stats
ev: Stats
# move_set = Mapping[int, Mapping[str, Union[str, int]]]
pid: str
gender: str
nature: str
ability: str
# def evolve(self: P) -> None:
# """
# Evolve into another Pokémon.
# 1. Statistics are updated.
# 2. Learnset is updated.
# 3. Evolution tree is updated.
# Returns:
# Nothing
# """
# pass
# def level_up(self: P) -> None:
# """Increase `Pokemon`'s level by 1.
# Returns:
# Nothing
# """
# pass
# @classmethod
# def _from_pokedex_by_id(
# cls: "BasePokemon",
# national_id: int,
# level: int,
# item_held: str = None,
# iv: Stats = None,
# ev: Stats = None,
# pid: int = None,
# nature: str = None,
# ability: str = None,
# gender: str = None,
# ) -> "BasePokemon":
# """Instantiate a `BasePokemon` by its national id.
# Everything else is randomized.
# Args:
# national_id: the Pokemon's ID in the National Pokedex.
# level: Pokemon's level.
# item_held: Pokemon's holding item.
# iv: Pokemon's individual values, `Stats`, used to determine its permanent stats.
# A random IV will be set if not provided.
# ev: Pokemon's effort values, `Stats`, used to determine its permanent stats. An
# all-zero ev will be set if not provided.
# pid: Pokemon's personality id. `nature`, `ability`, and `gender` will use
# their provided value first. A random `pid` will be set if not provided.
# nature: Pokemon's nature, used to determine its permanent stats. If nothing is
# provided, then the function will use `pid` to determine its `nature`.
# ability: Pokemon's ability, `str`. If nothing is provided, then the function
# will use `pid` to determine its `nature`.
# gender: Pokemon's gender. If nothing is provided, then the function will use
# `pid` to determine its `nature`.
# Returns:
# A `BasePokemon` instance.
# """
# # Build pokemon data
# pokemon_data = _db.get_pokemon(national_id=national_id)
# growth_data = _db.get_experience(national_id=national_id, level=level)
# species_data = pokemon_data.species
# species = species_data.identifier
# # Determine stats
# gene = prng.create_gene()
# iv = iv or Stats.create_iv(gene=gene)
# ev = ev or Stats.zeros()
# base_stats = {}
# for i, stat in enumerate(STAT_NAMES):
# base_stats[stat] = pokemon_data.stats[i].base_stat
# stats = _calc_stats(level=level, base_stats=base_stats, iv=iv, ev=ev, nature=nature)
# current_stats = stats
# # PID related attributes
# pid = pid or prng.create_personality()
# nature = nature or _db.get_nature(pid).identifier
# ability = ability or _db.get_ability(species=species, personality=pid).identifier
# gender = gender or _db.get_pokemon_gender(species=species, personality=pid).identifier
# return cls(
# pid=pid,
# national_id=species_data.id,
# species=species,
# types=list(map(lambda x: x.identifier, pokemon_data.types)),
# item_held=item_held,
# exp=growth_data.experience,
# level=growth_data.level,
# stats=stats,
# current_stats=current_stats,
# ev=ev,
# iv=iv,
# nature=nature,
# ability=ability,
# gender=gender,
# )
def _calc_stats(level: int, base_stats: Stats, iv: Stats, ev: Stats, nature: str) -> Stats:
"""Calculate the Pokemon's stats."""
nature_modifiers = Stats.nature_modifiers(nature)
residual_stats = Stats(
hp=10 + level,
atk=5,
def_=5,
spatk=5,
spdef=5,
spd=5,
)
stats = ((base_stats * 2 + iv + ev // 4) * level // 100 + residual_stats) * nature_modifiers
if base_stats.hp == 1:
stats.hp = 1
return stats
| 30.254181
| 96
| 0.563564
|
452c3d28cfd8333e8e081d61b52b92087f54192f
| 7,320
|
py
|
Python
|
metrics/ir_metric.py
|
zhongerqiandan/OpenDialog
|
f478b2a912c8c742da5ced510ac40da59217ddb3
|
[
"MIT"
] | 98
|
2020-07-16T06:27:29.000Z
|
2022-03-12T15:21:51.000Z
|
metrics/ir_metric.py
|
zhongerqiandan/OpenDialog
|
f478b2a912c8c742da5ced510ac40da59217ddb3
|
[
"MIT"
] | 2
|
2020-07-22T12:00:17.000Z
|
2021-02-24T01:19:14.000Z
|
metrics/ir_metric.py
|
gmftbyGMFTBY/OpenDialog
|
8eb56b7a21cea1172131db7a56d2656364144771
|
[
"MIT"
] | 19
|
2020-07-16T08:36:09.000Z
|
2021-09-14T05:36:54.000Z
|
import numpy as np
# import math
def to_relevance_scores(y_true, y_pred):
"""
Returns a list of relevance scores (binary), which can be used in various
information retrieval metrics
:param y_true: a list of relevant items (such as document ids)
:param y_pred: a list of predicted items
:return: a list of binary relevance scores, ex. [1, 0, 1, 1, 0, ...]
"""
rel_scores = [0] * len(y_pred)
for i, d in enumerate(y_pred):
if d in y_true and d not in y_pred[:i]:
rel_scores[i] = 1
return rel_scores
def precision_at_k(y_true, y_pred, k=None):
"""
The fraction of the documents retrieved that are relevant at position k;
:param y_true: list
A list of ground truth elements (order is not counted)
:param y_pred: list
A list of predicted elements (order does matter)
:param k: int (optional)
Number of results to consider
If k is none, k is the length of the given list of relevance scores.
:return: double
The precision score over the input lists at position k
"""
rel_scores = to_relevance_scores(y_true, y_pred)
if k is None:
k = len(rel_scores)
rel_scores = np.asarray(rel_scores)[:k]
return np.mean(rel_scores) if len(rel_scores) > 0 else 0
def recall(y_true, y_pred, cutoffs=None, rel_scores=None, min_rel_level=1):
"""
The fraction of the relevant documents that are successfully retrieved
measured at various cutoffs.
:param y_true: list
A list of ground truth elements (order is not counted)
:param y_pred: list
A list of predicted elements (order does matter)
:param cutoffs: list (optional)
A list of cutoff positions at where the recall is calculated
:param rel_scores: list (optional)
A list of relevance scores; If provided, this is used instead of
binary relevance scores calculated from y_true and y_pred
:param min_rel_level: int (optional)
Relevance score level that is used to count items which can be
considered as relevant
:return: list
A list of pairs such that (cutoff position, recall)
"""
if rel_scores is None:
rel_scores = to_relevance_scores(y_true, y_pred)
if cutoffs is None:
cutoffs = [1, 2, 5]
recalls = [[c, 0.] for c in cutoffs]
recall_count = 0
cutoff_idx = 0
for i, rel in enumerate(rel_scores):
if i == cutoffs[cutoff_idx]:
recalls[cutoff_idx][1] = recall_count / len(y_true)
cutoff_idx += 1
if cutoff_idx == len(cutoffs):
break
if rel >= min_rel_level:
recall_count += 1
# Assign recalls for the rest of the cutoffs if exist
while cutoff_idx < len(cutoffs):
recalls[cutoff_idx][1] = recall_count / len(y_true)
cutoff_idx += 1
return recalls
def f_measure(y_true, y_pred, beta=1):
"""
The weighted harmonic mean of precision and recall.
:param y_true: list
A list of ground truth elements (order is not counted)
:param y_pred: list
A list of predicted elements (order does matter)
:param beta: non-negative real number, balancing factor
:return: (double, double, double)
Three measures; precision, recall, and f-measure
"""
p = precision_at_k(y_true, y_pred)
recalls = recall(y_true, y_pred)
r = recalls[-1][1]
b2 = beta ** 2
if p == 0 and r == 0:
f = 0
else:
f = (1 + b2) * p * r / b2 * (p + r)
return f
def avg_precision_at_k(y_true, y_pred, k=None):
"""
Average Precision at k between two lists
:param y_true: list
A list of ground truth elements (order is not counted)
:param y_pred: list
A list of predicted elements (order does matter)
:param k: int, optional
The maximum number of predicted elements
:return: double
The average precision at k over the input lists
"""
rel_scores = to_relevance_scores(y_true, y_pred)
if k is None:
k = len(rel_scores)
rel_scores = np.asarray(rel_scores)[:k]
out = [precision_at_k(y_true, y_pred, k + 1) for k in range(rel_scores.size) if rel_scores[k]]
if not out:
return 0.
return np.mean(out)
def mean_avg_precision_at_k(y_true, y_pred, k=10):
"""
Mean Average Precision at k between two lists
:param y_true: list
A list of lists of ground truth elements
:param y_pred: list
A list of lists of predicted elements
:param k: int, optional
The maximum number of predicted elements
:return: double
The mean average precision at k (MAP) over the input lists
"""
return np.mean([avg_precision_at_k(t, p, k)
for t, p in zip(y_true, y_pred)])
def dcg(y_true, y_pred, rel_scores=None, k=None):
"""
DCG (Discounted Cumulative Gain) -- measures the gain ("usefulness")
based on the position of a document in the result list. Here, we are
using the alternative formula which places stronger emphasis on retrieving
relevant documents.
description (https://goo.gl/5yPVU7)
DCG_p = \sum_{i=1}^p \frac{2^{rel_i}-1}{\log_2(i+1)}
"""
if rel_scores is None:
rel_scores = to_relevance_scores(y_true, y_pred)
if k is None:
k = len(rel_scores)
rel_scores = np.asarray(rel_scores)[:k]
assert len(rel_scores) == len(y_pred)
if len(rel_scores) > 0:
return np.sum((np.exp2(rel_scores) - 1) /
np.log2(np.arange(2, rel_scores.size + 2)))
else:
return 0.
def ndcg(y_true, y_pred, rel_scores=None, k=None):
if rel_scores is None:
rel_scores = to_relevance_scores(y_true, y_pred)
if k is None:
k = len(rel_scores)
rel_scores = np.asarray(rel_scores)[:k]
if len(rel_scores) > 0:
rel_scores_r = sorted(rel_scores, reverse=True)
idcg = np.sum((np.exp2(rel_scores_r) - 1) / np.log2(np.arange(2, len(rel_scores_r) + 2)))
return dcg(y_true, y_pred, rel_scores=None, k=None) / idcg
def reciprocal_rank(y_true, y_pred):
'''
Reciprocal rank between two list
'''
rank = 0
for idx, i in enumerate(y_pred):
if i in y_true:
rank = idx
break
rank = 0 if rank == 0 else 1 / rank
return rank
def mean_reciprocal_rank(y_true, y_pred):
return np.mean([reciprocal_rank(t, p) for t, p in zip(y_true, y_pred)])
if __name__ == "__main__":
# y_true = list('a')
# y_pred = list('czbajepgard')
y_true = list('a')
y_pred = list('bacd')
rel_scores = to_relevance_scores(y_true, y_pred)
print(f'rel_scores: {rel_scores}')
print(f'P@K: {precision_at_k(y_true, y_pred)}')
print(f'recall: {recall(y_true, y_pred, cutoffs=[1])}')
print(f'avg_prec at 1: {avg_precision_at_k(y_true, y_pred, k=1)}')
print(f'avg_prec at 3: {avg_precision_at_k(y_true, y_pred, k=3)}')
print(f'avg_prec at 5: {avg_precision_at_k(y_true, y_pred, k=5)}')
print(f'avg_prec: {avg_precision_at_k(y_true, y_pred)}')
y_true = [list('a'), list('n')]
y_pred = [list('czbajepgard'), list('ahbcnfgmdek')]
print(f'mean avg_prec: {mean_avg_precision_at_k(y_true, y_pred)}')
print(f'MRR: {mean_reciprocal_rank(y_true, y_pred)}')
| 32.533333
| 98
| 0.639891
|
8a7deb4a905015c26846bf5809c8126d2fee43f4
| 66,336
|
py
|
Python
|
source/browseMode.py
|
gauravshaha/nvda
|
4d6790378e18b6350d38ab2a052500e61ee41bfa
|
[
"bzip2-1.0.6"
] | 1
|
2019-10-26T04:13:35.000Z
|
2019-10-26T04:13:35.000Z
|
source/browseMode.py
|
gauravshaha/nvda
|
4d6790378e18b6350d38ab2a052500e61ee41bfa
|
[
"bzip2-1.0.6"
] | null | null | null |
source/browseMode.py
|
gauravshaha/nvda
|
4d6790378e18b6350d38ab2a052500e61ee41bfa
|
[
"bzip2-1.0.6"
] | null | null | null |
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2007-2016 NV Access Limited
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
import itertools
import collections
import winsound
import time
import weakref
import wx
from logHandler import log
import review
import scriptHandler
import eventHandler
import nvwave
import queueHandler
import gui
import ui
import cursorManager
from scriptHandler import isScriptWaiting, willSayAllResume
import aria
import controlTypes
import config
import textInfos
import braille
import speech
import sayAllHandler
import treeInterceptorHandler
import inputCore
import api
from NVDAObjects import NVDAObject
REASON_QUICKNAV = "quickNav"
def reportPassThrough(treeInterceptor,onlyIfChanged=True):
"""Reports the pass through mode if it has changed.
@param treeInterceptor: The current Browse Mode treeInterceptor.
@type treeInterceptor: L{BrowseModeTreeInterceptor}
@param onlyIfChanged: if true reporting will not happen if the last reportPassThrough reported the same thing.
@type onlyIfChanged: bool
"""
if not onlyIfChanged or treeInterceptor.passThrough != reportPassThrough.last:
if config.conf["virtualBuffers"]["passThroughAudioIndication"]:
sound = r"waves\focusMode.wav" if treeInterceptor.passThrough else r"waves\browseMode.wav"
nvwave.playWaveFile(sound)
else:
if treeInterceptor.passThrough:
ui.message(_("Focus mode"))
else:
ui.message(_("Browse mode"))
reportPassThrough.last = treeInterceptor.passThrough
reportPassThrough.last = False
def mergeQuickNavItemIterators(iterators,direction="next"):
"""
Merges multiple iterators that emit L{QuickNavItem} objects, yielding them from first to last.
They are sorted using min or max (__lt__ should be implemented on the L{QuickNavItem} objects).
@param iters: the iterators you want to merge.
@type iters: sequence of iterators that emit L{QuicknavItem} objects.
@param direction: the direction these iterators are searching (e.g. next, previuos)
@type direction: string
"""
finder=min if direction=="next" else max
curValues=[]
# Populate a list with all iterators and their corisponding first value
for it in iterators:
try:
val=next(it)
except StopIteration:
continue
curValues.append((it,val))
# Until all iterators have been used up,
# Find the first (minimum or maximum) of all the values,
# emit that, and update the list with the next available value for the iterator who's value was emitted.
while len(curValues)>0:
first=finder(curValues,key=lambda x: x[1])
curValues.remove(first)
it,val=first
yield val
try:
newVal=next(it)
except StopIteration:
continue
curValues.append((it,newVal))
class QuickNavItem(object):
""" Emitted by L{BrowseModeTreeInterceptor._iterNodesByType}, this represents one of many positions in a browse mode document, based on the type of item being searched for (e.g. link, heading, table etc)."""
itemType=None #: The type of items searched for (e.g. link, heading, table etc)
label=None #: The label that should represent this item in the Elements list.
isAfterSelection=False #: Is this item positioned after the caret in the document? Used by the elements list to plae its own selection.
def __init__(self,itemType,document):
"""
@param itemType: the type that was searched for (e.g. link, heading, table etc)
@ type itemType: string
@ param document: the browse mode document this item is a part of.
@type document: L{BrowseModeTreeInterceptor}
"""
self.itemType=itemType
self.document=document
def isChild(self,parent):
"""
Is this item a child of the given parent?
This is used when representing items in a hierarchical tree structure, such as the Elements List.
@param parent: the item of whom this item may be a child of.
@type parent: L{QuickNavItem}
@return: True if this item is a child, false otherwise.
@rtype: bool
"""
raise NotImplementedError
def report(self,readUnit=None):
"""
Reports the contents of this item.
@param readUnit: the optional unit (e.g. line, paragraph) that should be used to announce the item position when moved to. If not given, then the full sise of the item is used.
@type readUnit: a L{textInfos}.UNIT_* constant.
"""
raise NotImplementedError
def moveTo(self):
"""
Moves the browse mode caret or focus to this item.
"""
raise NotImplementedError
def activate(self):
"""
Activates this item's position. E.g. follows a link, presses a button etc.
"""
raise NotImplementedError
def rename(self,newName):
"""
Renames this item with the new name.
"""
raise NotImplementedError
@property
def isRenameAllowed(self):
return False
class TextInfoQuickNavItem(QuickNavItem):
""" Represents a quick nav item in a browse mode document who's positions are represented by a L{textInfos.TextInfo}. """
def __init__(self,itemType,document,textInfo):
"""
See L{QuickNavItem.__init__} for itemType and document argument definitions.
@param textInfo: the textInfo position this item represents.
@type textInfo: L{textInfos.TextInfo}
"""
self.textInfo=textInfo
super(TextInfoQuickNavItem,self).__init__(itemType,document)
def __lt__(self,other):
return self.textInfo.compareEndPoints(other.textInfo,"startToStart")<0
@property
def obj(self):
return self.textInfo.basePosition if isinstance(self.textInfo.basePosition,NVDAObject) else None
@property
def label(self):
return self.textInfo.text.strip()
def isChild(self,parent):
if parent.textInfo.isOverlapping(self.textInfo):
return True
return False
def report(self,readUnit=None):
info=self.textInfo
if readUnit:
fieldInfo = info.copy()
info.collapse()
info.move(readUnit, 1, endPoint="end")
if info.compareEndPoints(fieldInfo, "endToEnd") > 0:
# We've expanded past the end of the field, so limit to the end of the field.
info.setEndPoint(fieldInfo, "endToEnd")
speech.speakTextInfo(info, reason=controlTypes.REASON_FOCUS)
def activate(self):
self.textInfo.obj._activatePosition(self.textInfo)
def moveTo(self):
info=self.textInfo.copy()
info.collapse()
self.document._set_selection(info,reason=REASON_QUICKNAV)
@property
def isAfterSelection(self):
caret=self.document.makeTextInfo(textInfos.POSITION_CARET)
return self.textInfo.compareEndPoints(caret, "startToStart") > 0
class BrowseModeTreeInterceptor(treeInterceptorHandler.TreeInterceptor):
scriptCategory = inputCore.SCRCAT_BROWSEMODE
def _get_shouldTrapNonCommandGestures(self):
return config.conf['virtualBuffers']['trapNonCommandGestures']
def script_trapNonCommandGesture(self,gesture):
winsound.PlaySound("default",1)
singleLetterNavEnabled=True #: Whether single letter navigation scripts should be active (true) or if these letters should fall to the application.
def getAlternativeScript(self,gesture,script):
if self.passThrough or not gesture.isCharacter:
return script
if not self.singleLetterNavEnabled:
return None
if not script and self.shouldTrapNonCommandGestures:
script=self.script_trapNonCommandGesture
return script
def script_toggleSingleLetterNav(self,gesture):
if self.singleLetterNavEnabled:
self.singleLetterNavEnabled=False
# Translators: Reported when single letter navigation in browse mode is turned off.
ui.message(_("Single letter navigation off"))
else:
self.singleLetterNavEnabled=True
# Translators: Reported when single letter navigation in browse mode is turned on.
ui.message(_("Single letter navigation on"))
# Translators: the description for the toggleSingleLetterNavigation command in browse mode.
script_toggleSingleLetterNav.__doc__=_("Toggles single letter navigation on and off. When on, single letter keys in browse mode jump to various kinds of elements on the page. When off, these keys are passed to the application")
def _get_ElementsListDialog(self):
return ElementsListDialog
def _iterNodesByType(self,itemType,direction="next",pos=None):
"""
Yields L{QuickNavItem} objects representing the ordered positions in this document according to the type being searched for (e.g. link, heading, table etc).
@param itemType: the type being searched for (e.g. link, heading, table etc)
@type itemType: string
@param direction: the direction in which to search (next, previous, up)
@ type direction: string
@param pos: the position in the document from where to seart the search.
@type pos: Usually an L{textInfos.TextInfo}
@raise NotImplementedError: This type is not supported by this BrowseMode implementation
"""
raise NotImplementedError
def _iterNotLinkBlock(self, direction="next", pos=None):
raise NotImplementedError
def _quickNavScript(self,gesture, itemType, direction, errorMessage, readUnit):
if itemType=="notLinkBlock":
iterFactory=self._iterNotLinkBlock
else:
iterFactory=lambda direction,info: self._iterNodesByType(itemType,direction,info)
info=self.selection
try:
item = next(iterFactory(direction, info))
except NotImplementedError:
# Translators: a message when a particular quick nav command is not supported in the current document.
ui.message(_("Not supported in this document"))
return
except StopIteration:
ui.message(errorMessage)
return
item.moveTo()
if not gesture or not willSayAllResume(gesture):
item.report(readUnit=readUnit)
@classmethod
def addQuickNav(cls, itemType, key, nextDoc, nextError, prevDoc, prevError, readUnit=None):
scriptSuffix = itemType[0].upper() + itemType[1:]
scriptName = "next%s" % scriptSuffix
funcName = "script_%s" % scriptName
script = lambda self,gesture: self._quickNavScript(gesture, itemType, "next", nextError, readUnit)
script.__doc__ = nextDoc
script.__name__ = funcName
script.resumeSayAllMode=sayAllHandler.CURSOR_CARET
setattr(cls, funcName, script)
cls.__gestures["kb:%s" % key] = scriptName
scriptName = "previous%s" % scriptSuffix
funcName = "script_%s" % scriptName
script = lambda self,gesture: self._quickNavScript(gesture, itemType, "previous", prevError, readUnit)
script.__doc__ = prevDoc
script.__name__ = funcName
script.resumeSayAllMode=sayAllHandler.CURSOR_CARET
setattr(cls, funcName, script)
cls.__gestures["kb:shift+%s" % key] = scriptName
def script_elementsList(self,gesture):
# We need this to be a modal dialog, but it mustn't block this script.
def run():
gui.mainFrame.prePopup()
d = self.ElementsListDialog(self)
d.ShowModal()
d.Destroy()
gui.mainFrame.postPopup()
wx.CallAfter(run)
# Translators: the description for the Elements List command in browse mode.
script_elementsList.__doc__ = _("Lists various types of elements in this document")
def _activatePosition(self):
raise NotImplementedError
def script_activatePosition(self,gesture):
self._activatePosition()
# Translators: the description for the activatePosition script on browseMode documents.
script_activatePosition.__doc__ = _("Activates the current object in the document")
__gestures={
"kb:NVDA+f7": "elementsList",
"kb:enter": "activatePosition",
"kb:numpadEnter": "activatePosition",
"kb:space": "activatePosition",
"kb:NVDA+shift+space":"toggleSingleLetterNav",
}
# Add quick navigation scripts.
qn = BrowseModeTreeInterceptor.addQuickNav
qn("heading", key="h",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next heading"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next heading"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous heading"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous heading"))
qn("heading1", key="1",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next heading at level 1"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next heading at level 1"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous heading at level 1"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous heading at level 1"))
qn("heading2", key="2",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next heading at level 2"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next heading at level 2"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous heading at level 2"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous heading at level 2"))
qn("heading3", key="3",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next heading at level 3"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next heading at level 3"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous heading at level 3"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous heading at level 3"))
qn("heading4", key="4",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next heading at level 4"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next heading at level 4"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous heading at level 4"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous heading at level 4"))
qn("heading5", key="5",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next heading at level 5"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next heading at level 5"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous heading at level 5"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous heading at level 5"))
qn("heading6", key="6",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next heading at level 6"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next heading at level 6"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous heading at level 6"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous heading at level 6"))
qn("table", key="t",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next table"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next table"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous table"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous table"),
readUnit=textInfos.UNIT_LINE)
qn("link", key="k",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next link"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next link"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous link"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous link"))
qn("visitedLink", key="v",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next visited link"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next visited link"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous visited link"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous visited link"))
qn("unvisitedLink", key="u",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next unvisited link"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next unvisited link"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous unvisited link"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous unvisited link"))
qn("formField", key="f",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next form field"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next form field"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous form field"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous form field"),
readUnit=textInfos.UNIT_LINE)
qn("list", key="l",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next list"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next list"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous list"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous list"),
readUnit=textInfos.UNIT_LINE)
qn("listItem", key="i",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next list item"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next list item"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous list item"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous list item"))
qn("button", key="b",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next button"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next button"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous button"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous button"))
qn("edit", key="e",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next edit field"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next edit field"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous edit field"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous edit field"),
readUnit=textInfos.UNIT_LINE)
qn("frame", key="m",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next frame"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next frame"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous frame"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous frame"),
readUnit=textInfos.UNIT_LINE)
qn("separator", key="s",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next separator"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next separator"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous separator"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous separator"))
qn("radioButton", key="r",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next radio button"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next radio button"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous radio button"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous radio button"))
qn("comboBox", key="c",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next combo box"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next combo box"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous combo box"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous combo box"))
qn("checkBox", key="x",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next check box"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next check box"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous check box"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous check box"))
qn("graphic", key="g",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next graphic"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next graphic"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous graphic"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous graphic"))
qn("blockQuote", key="q",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next block quote"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next block quote"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous block quote"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous block quote"))
qn("notLinkBlock", key="n",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("skips forward past a block of links"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no more text after a block of links"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("skips backward past a block of links"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no more text before a block of links"),
readUnit=textInfos.UNIT_LINE)
qn("landmark", key="d",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next landmark"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next landmark"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous landmark"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous landmark"),
readUnit=textInfos.UNIT_LINE)
qn("embeddedObject", key="o",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next embedded object"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next embedded object"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous embedded object"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous embedded object"))
qn("annotation", key="a",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next annotation"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next annotation"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous annotation"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous annotation"))
del qn
class ElementsListDialog(wx.Dialog):
ELEMENT_TYPES = (
# Translators: The label of a radio button to select the type of element
# in the browse mode Elements List dialog.
("link", _("Lin&ks")),
# Translators: The label of a radio button to select the type of element
# in the browse mode Elements List dialog.
("heading", _("&Headings")),
# Translators: The label of a radio button to select the type of element
# in the browse mode Elements List dialog.
("landmark", _("Lan&dmarks")),
)
Element = collections.namedtuple("Element", ("item", "parent"))
lastSelectedElementType=0
def __init__(self, document):
self.document = document
# Translators: The title of the browse mode Elements List dialog.
super(ElementsListDialog, self).__init__(gui.mainFrame, wx.ID_ANY, _("Elements List"))
mainSizer = wx.BoxSizer(wx.VERTICAL)
# Translators: The label of a group of radio buttons to select the type of element
# in the browse mode Elements List dialog.
child = wx.RadioBox(self, wx.ID_ANY, label=_("Type:"), choices=tuple(et[1] for et in self.ELEMENT_TYPES))
child.SetSelection(self.lastSelectedElementType)
child.Bind(wx.EVT_RADIOBOX, self.onElementTypeChange)
mainSizer.Add(child,proportion=1)
self.tree = wx.TreeCtrl(self, wx.ID_ANY, style=wx.TR_HAS_BUTTONS | wx.TR_HIDE_ROOT | wx.TR_SINGLE | wx.TR_EDIT_LABELS)
self.tree.Bind(wx.EVT_SET_FOCUS, self.onTreeSetFocus)
self.tree.Bind(wx.EVT_CHAR, self.onTreeChar)
self.tree.Bind(wx.EVT_TREE_BEGIN_LABEL_EDIT, self.onTreeLabelEditBegin)
self.tree.Bind(wx.EVT_TREE_END_LABEL_EDIT, self.onTreeLabelEditEnd)
self.treeRoot = self.tree.AddRoot("root")
mainSizer.Add(self.tree,proportion=7,flag=wx.EXPAND)
sizer = wx.BoxSizer(wx.HORIZONTAL)
# Translators: The label of an editable text field to filter the elements
# in the browse mode Elements List dialog.
label = wx.StaticText(self, wx.ID_ANY, _("&Filter by:"))
sizer.Add(label)
self.filterEdit = wx.TextCtrl(self, wx.ID_ANY)
self.filterEdit.Bind(wx.EVT_TEXT, self.onFilterEditTextChange)
sizer.Add(self.filterEdit)
mainSizer.Add(sizer,proportion=1)
sizer = wx.BoxSizer(wx.HORIZONTAL)
# Translators: The label of a button to activate an element
# in the browse mode Elements List dialog.
self.activateButton = wx.Button(self, wx.ID_ANY, _("&Activate"))
self.activateButton.Bind(wx.EVT_BUTTON, lambda evt: self.onAction(True))
sizer.Add(self.activateButton)
# Translators: The label of a button to move to an element
# in the browse mode Elements List dialog.
self.moveButton = wx.Button(self, wx.ID_ANY, _("&Move to"))
self.moveButton.Bind(wx.EVT_BUTTON, lambda evt: self.onAction(False))
sizer.Add(self.moveButton)
sizer.Add(wx.Button(self, wx.ID_CANCEL))
mainSizer.Add(sizer,proportion=1)
mainSizer.Fit(self)
self.SetSizer(mainSizer)
self.tree.SetFocus()
self.initElementType(self.ELEMENT_TYPES[self.lastSelectedElementType][0])
self.Center(wx.BOTH | wx.CENTER_ON_SCREEN)
def onElementTypeChange(self, evt):
elementType=evt.GetInt()
# We need to make sure this gets executed after the focus event.
# Otherwise, NVDA doesn't seem to get the event.
queueHandler.queueFunction(queueHandler.eventQueue, self.initElementType, self.ELEMENT_TYPES[elementType][0])
self.lastSelectedElementType=elementType
def initElementType(self, elType):
if elType == "link":
# Links can be activated.
self.activateButton.Enable()
self.SetAffirmativeId(self.activateButton.GetId())
else:
# No other element type can be activated.
self.activateButton.Disable()
self.SetAffirmativeId(self.moveButton.GetId())
# Gather the elements of this type.
self._elements = []
self._initialElement = None
parentElements = []
isAfterSelection=False
for item in self.document._iterNodesByType(elType):
# Find the parent element, if any.
for parent in reversed(parentElements):
if item.isChild(parent.item):
break
else:
# We're not a child of this parent, so this parent has no more children and can be removed from the stack.
parentElements.pop()
else:
# No parent found, so we're at the root.
# Note that parentElements will be empty at this point, as all parents are no longer relevant and have thus been removed from the stack.
parent = None
element=self.Element(item,parent)
self._elements.append(element)
if not isAfterSelection:
isAfterSelection=item.isAfterSelection
if not isAfterSelection:
# The element immediately preceding or overlapping the caret should be the initially selected element.
# Since we have not yet passed the selection, use this as the initial element.
try:
self._initialElement = self._elements[-1]
except IndexError:
# No previous element.
pass
# This could be the parent of a subsequent element, so add it to the parents stack.
parentElements.append(element)
# Start with no filtering.
self.filterEdit.ChangeValue("")
self.filter("", newElementType=True)
def filter(self, filterText, newElementType=False):
# If this is a new element type, use the element nearest the cursor.
# Otherwise, use the currently selected element.
defaultElement = self._initialElement if newElementType else self.tree.GetItemPyData(self.tree.GetSelection())
# Clear the tree.
self.tree.DeleteChildren(self.treeRoot)
# Populate the tree with elements matching the filter text.
elementsToTreeItems = {}
defaultItem = None
matched = False
#Do case-insensitive matching by lowering both filterText and each element's text.
filterText=filterText.lower()
for element in self._elements:
label=element.item.label
if filterText and filterText not in label.lower():
continue
matched = True
parent = element.parent
if parent:
parent = elementsToTreeItems.get(parent)
item = self.tree.AppendItem(parent or self.treeRoot, label)
self.tree.SetItemPyData(item, element)
elementsToTreeItems[element] = item
if element == defaultElement:
defaultItem = item
self.tree.ExpandAll()
if not matched:
# No items, so disable the buttons.
self.activateButton.Disable()
self.moveButton.Disable()
return
# If there's no default item, use the first item in the tree.
self.tree.SelectItem(defaultItem or self.tree.GetFirstChild(self.treeRoot)[0])
# Enable the button(s).
# If the activate button isn't the default button, it is disabled for this element type and shouldn't be enabled here.
if self.AffirmativeId == self.activateButton.Id:
self.activateButton.Enable()
self.moveButton.Enable()
def onTreeSetFocus(self, evt):
# Start with no search.
self._searchText = ""
self._searchCallLater = None
evt.Skip()
def onTreeChar(self, evt):
key = evt.KeyCode
if key == wx.WXK_RETURN:
# The enter key should be propagated to the dialog and thus activate the default button,
# but this is broken (wx ticket #3725).
# Therefore, we must catch the enter key here.
# Activate the current default button.
evt = wx.CommandEvent(wx.wxEVT_COMMAND_BUTTON_CLICKED, wx.ID_ANY)
button = self.FindWindowById(self.AffirmativeId)
if button.Enabled:
button.ProcessEvent(evt)
else:
wx.Bell()
elif key == wx.WXK_F2:
item=self.tree.GetSelection()
if item:
selectedItemType=self.tree.GetItemPyData(item).item
self.tree.EditLabel(item)
evt.Skip()
elif key >= wx.WXK_START or key == wx.WXK_BACK:
# Non-printable character.
self._searchText = ""
evt.Skip()
else:
# Search the list.
# We have to implement this ourselves, as tree views don't accept space as a search character.
char = unichr(evt.UnicodeKey).lower()
# IF the same character is typed twice, do the same search.
if self._searchText != char:
self._searchText += char
if self._searchCallLater:
self._searchCallLater.Restart()
else:
self._searchCallLater = wx.CallLater(1000, self._clearSearchText)
self.search(self._searchText)
def onTreeLabelEditBegin(self,evt):
item=self.tree.GetSelection()
selectedItemType = self.tree.GetItemPyData(item).item
if not selectedItemType.isRenameAllowed:
evt.Veto()
def onTreeLabelEditEnd(self,evt):
selectedItemNewName=evt.GetLabel()
item=self.tree.GetSelection()
selectedItemType = self.tree.GetItemPyData(item).item
selectedItemType.rename(selectedItemNewName)
def _clearSearchText(self):
self._searchText = ""
def search(self, searchText):
item = self.tree.GetSelection()
if not item:
# No items.
return
# First try searching from the current item.
# Failing that, search from the first item.
items = itertools.chain(self._iterReachableTreeItemsFromItem(item), self._iterReachableTreeItemsFromItem(self.tree.GetFirstChild(self.treeRoot)[0]))
if len(searchText) == 1:
# If only a single character has been entered, skip (search after) the current item.
next(items)
for item in items:
if self.tree.GetItemText(item).lower().startswith(searchText):
self.tree.SelectItem(item)
return
# Not found.
wx.Bell()
def _iterReachableTreeItemsFromItem(self, item):
while item:
yield item
childItem = self.tree.GetFirstChild(item)[0]
if childItem and self.tree.IsExpanded(item):
# Has children and is reachable, so recurse.
for childItem in self._iterReachableTreeItemsFromItem(childItem):
yield childItem
item = self.tree.GetNextSibling(item)
def onFilterEditTextChange(self, evt):
self.filter(self.filterEdit.GetValue())
evt.Skip()
def onAction(self, activate):
self.Close()
# Save off the last selected element type on to the class so its used in initialization next time.
self.__class__.lastSelectedElementType=self.lastSelectedElementType
item = self.tree.GetSelection()
item = self.tree.GetItemPyData(item).item
if activate:
item.activate()
else:
def move():
speech.cancelSpeech()
item.moveTo()
item.report()
wx.CallLater(100, move)
class BrowseModeDocumentTextInfo(textInfos.TextInfo):
def getControlFieldSpeech(self, attrs, ancestorAttrs, fieldType, formatConfig=None, extraDetail=False, reason=None):
textList = []
landmark = attrs.get("landmark")
if formatConfig["reportLandmarks"] and fieldType == "start_addedToControlFieldStack" and landmark:
try:
textList.append(attrs["name"])
except KeyError:
pass
if landmark == "region":
# The word landmark is superfluous for regions.
textList.append(aria.landmarkRoles[landmark])
else:
textList.append(_("%s landmark") % aria.landmarkRoles[landmark])
textList.append(super(BrowseModeDocumentTextInfo, self).getControlFieldSpeech(attrs, ancestorAttrs, fieldType, formatConfig, extraDetail, reason))
return " ".join(textList)
def getControlFieldBraille(self, field, ancestors, reportStart, formatConfig):
textList = []
landmark = field.get("landmark")
if formatConfig["reportLandmarks"] and reportStart and landmark and field.get("_startOfNode"):
try:
textList.append(field["name"])
except KeyError:
pass
if landmark == "region":
# The word landmark is superfluous for regions.
textList.append(aria.landmarkRoles[landmark])
else:
# Translators: This is spoken and brailled to indicate a landmark (example output: main landmark).
textList.append(_("%s landmark") % aria.landmarkRoles[landmark])
text = super(BrowseModeDocumentTextInfo, self).getControlFieldBraille(field, ancestors, reportStart, formatConfig)
if text:
textList.append(text)
return " ".join(textList)
def _get_focusableNVDAObjectAtStart(self):
try:
item = next(self.obj._iterNodesByType("focusable", "up", self))
except StopIteration:
return self.obj.rootNVDAObject
if not item:
return self.obj.rootNVDAObject
return item.obj
class BrowseModeDocumentTreeInterceptor(cursorManager.CursorManager,BrowseModeTreeInterceptor,treeInterceptorHandler.DocumentTreeInterceptor):
programmaticScrollMayFireEvent = False
def __init__(self,obj):
super(BrowseModeDocumentTreeInterceptor,self).__init__(obj)
self.disableAutoPassThrough = False
self._lastProgrammaticScrollTime = None
self.documentConstantIdentifier = self.documentConstantIdentifier
self._lastFocusObj = None
self._hadFirstGainFocus = False
self._enteringFromOutside = True
# We need to cache this because it will be unavailable once the document dies.
if not hasattr(self.rootNVDAObject.appModule, "_browseModeRememberedCaretPositions"):
self.rootNVDAObject.appModule._browseModeRememberedCaretPositions = {}
self._lastCaretPosition = None
#: True if the last caret move was due to a focus change.
self._lastCaretMoveWasFocus = False
def terminate(self):
if self.shouldRememberCaretPositionAcrossLoads and self._lastCaretPosition:
try:
self.rootNVDAObject.appModule._browseModeRememberedCaretPositions[self.documentConstantIdentifier] = self._lastCaretPosition
except AttributeError:
# The app module died.
pass
def event_treeInterceptor_gainFocus(self):
"""Triggered when this browse mode document gains focus.
This event is only fired upon entering this treeInterceptor when it was not the current treeInterceptor before.
This is different to L{event_gainFocus}, which is fired when an object inside this treeInterceptor gains focus, even if that object is in the same treeInterceptor.
"""
doSayAll=False
hadFirstGainFocus=self._hadFirstGainFocus
if not hadFirstGainFocus:
# This treeInterceptor is gaining focus for the first time.
# Fake a focus event on the focus object, as the treeInterceptor may have missed the actual focus event.
focus = api.getFocusObject()
self.event_gainFocus(focus, lambda: focus.event_gainFocus())
if not self.passThrough:
# We only set the caret position if in browse mode.
# If in focus mode, the document must have forced the focus somewhere,
# so we don't want to override it.
initialPos = self._getInitialCaretPos()
if initialPos:
self.selection = self.makeTextInfo(initialPos)
reportPassThrough(self)
doSayAll=config.conf['virtualBuffers']['autoSayAllOnPageLoad']
self._hadFirstGainFocus = True
if not self.passThrough:
if doSayAll:
speech.speakObjectProperties(self.rootNVDAObject,name=True,states=True,reason=controlTypes.REASON_FOCUS)
sayAllHandler.readText(sayAllHandler.CURSOR_CARET)
else:
# Speak it like we would speak focus on any other document object.
# This includes when entering the treeInterceptor for the first time:
if not hadFirstGainFocus:
speech.speakObject(self.rootNVDAObject, reason=controlTypes.REASON_FOCUS)
else:
# And when coming in from an outside object
# #4069 But not when coming up from a non-rendered descendant.
ancestors=api.getFocusAncestors()
fdl=api.getFocusDifferenceLevel()
try:
tl=ancestors.index(self.rootNVDAObject)
except ValueError:
tl=len(ancestors)
if fdl<=tl:
speech.speakObject(self.rootNVDAObject, reason=controlTypes.REASON_FOCUS)
info = self.selection
if not info.isCollapsed:
speech.speakSelectionMessage(_("selected %s"), info.text)
else:
info.expand(textInfos.UNIT_LINE)
speech.speakTextInfo(info, reason=controlTypes.REASON_CARET, unit=textInfos.UNIT_LINE)
reportPassThrough(self)
braille.handler.handleGainFocus(self)
def event_caret(self, obj, nextHandler):
if self.passThrough:
nextHandler()
def _activateNVDAObject(self, obj):
"""Activate an object in response to a user request.
This should generally perform the default action or click on the object.
@param obj: The object to activate.
@type obj: L{NVDAObjects.NVDAObject}
"""
obj.doAction()
def _activateLongDesc(self,controlField):
"""
Activates (presents) the long description for a particular field (usually a graphic).
@param controlField: the field who's long description should be activated. This field is guaranteed to have states containing HASLONGDESC state.
@type controlField: dict
"""
raise NotImplementedError
def _activatePosition(self, info=None):
if not info:
info=self.makeTextInfo(textInfos.POSITION_CARET)
obj = info.NVDAObjectAtStart
if not obj:
return
if obj.role == controlTypes.ROLE_MATH:
import mathPres
try:
return mathPres.interactWithMathMl(obj.mathMl)
except (NotImplementedError, LookupError):
pass
return
if self.shouldPassThrough(obj):
obj.setFocus()
self.passThrough = True
reportPassThrough(self)
elif obj.role == controlTypes.ROLE_EMBEDDEDOBJECT or obj.role in self.APPLICATION_ROLES:
obj.setFocus()
speech.speakObject(obj, reason=controlTypes.REASON_FOCUS)
else:
self._activateNVDAObject(obj)
def _set_selection(self, info, reason=controlTypes.REASON_CARET):
super(BrowseModeDocumentTreeInterceptor, self)._set_selection(info)
if isScriptWaiting() or not info.isCollapsed:
return
# Save the last caret position for use in terminate().
# This must be done here because the buffer might be cleared just before terminate() is called,
# causing the last caret position to be lost.
caret = info.copy()
caret.collapse()
self._lastCaretPosition = caret.bookmark
review.handleCaretMove(caret)
if reason == controlTypes.REASON_FOCUS:
self._lastCaretMoveWasFocus = True
focusObj = api.getFocusObject()
if focusObj==self.rootNVDAObject:
return
else:
self._lastCaretMoveWasFocus = False
focusObj=info.focusableNVDAObjectAtStart
obj=info.NVDAObjectAtStart
if not obj:
log.debugWarning("Invalid NVDAObjectAtStart")
return
if obj==self.rootNVDAObject:
return
if focusObj and not eventHandler.isPendingEvents("gainFocus") and focusObj!=self.rootNVDAObject and focusObj != api.getFocusObject() and self._shouldSetFocusToObj(focusObj):
focusObj.setFocus()
obj.scrollIntoView()
if self.programmaticScrollMayFireEvent:
self._lastProgrammaticScrollTime = time.time()
self.passThrough=self.shouldPassThrough(focusObj,reason=reason)
# Queue the reporting of pass through mode so that it will be spoken after the actual content.
queueHandler.queueFunction(queueHandler.eventQueue, reportPassThrough, self)
def _shouldSetFocusToObj(self, obj):
"""Determine whether an object should receive focus.
Subclasses may extend or override this method.
@param obj: The object in question.
@type obj: L{NVDAObjects.NVDAObject}
"""
return obj.role not in self.APPLICATION_ROLES and obj.isFocusable and obj.role!=controlTypes.ROLE_EMBEDDEDOBJECT
def script_activateLongDesc(self,gesture):
info=self.makeTextInfo(textInfos.POSITION_CARET)
info.expand("character")
for field in reversed(info.getTextWithFields()):
if isinstance(field,textInfos.FieldCommand) and field.command=="controlStart":
states=field.field.get('states')
if states and controlTypes.STATE_HASLONGDESC in states:
self._activateLongDesc(field.field)
break
else:
# Translators: the message presented when the activateLongDescription script cannot locate a long description to activate.
ui.message(_("No long description"))
# Translators: the description for the activateLongDescription script on browseMode documents.
script_activateLongDesc.__doc__=_("Shows the long description at this position if one is found.")
def shouldPassThrough(self, obj, reason=None):
"""Determine whether pass through mode should be enabled or disabled for a given object.
@param obj: The object in question.
@type obj: L{NVDAObjects.NVDAObject}
@param reason: The reason for this query; one of the output reasons, L{REASON_QUICKNAV}, or C{None} for manual pass through mode activation by the user.
@return: C{True} if pass through mode should be enabled, C{False} if it should be disabled.
"""
if reason and (
self.disableAutoPassThrough
or (reason == controlTypes.REASON_FOCUS and not config.conf["virtualBuffers"]["autoPassThroughOnFocusChange"])
or (reason == controlTypes.REASON_CARET and not config.conf["virtualBuffers"]["autoPassThroughOnCaretMove"])
):
# This check relates to auto pass through and auto pass through is disabled, so don't change the pass through state.
return self.passThrough
if reason == REASON_QUICKNAV:
return False
states = obj.states
role = obj.role
if controlTypes.STATE_EDITABLE in states and controlTypes.STATE_UNAVAILABLE not in states:
return True
# Menus sometimes get focus due to menuStart events even though they don't report as focused/focusable.
if not obj.isFocusable and controlTypes.STATE_FOCUSED not in states and role != controlTypes.ROLE_POPUPMENU:
return False
# many controls that are read-only should not switch to passThrough.
# However, certain controls such as combo boxes and readonly edits are read-only but still interactive.
# #5118: read-only ARIA grids should also be allowed (focusable table cells, rows and headers).
if controlTypes.STATE_READONLY in states and role not in (controlTypes.ROLE_EDITABLETEXT, controlTypes.ROLE_COMBOBOX, controlTypes.ROLE_TABLEROW, controlTypes.ROLE_TABLECELL, controlTypes.ROLE_TABLEROWHEADER, controlTypes.ROLE_TABLECOLUMNHEADER):
return False
if reason == controlTypes.REASON_FOCUS and role in (controlTypes.ROLE_LISTITEM, controlTypes.ROLE_RADIOBUTTON, controlTypes.ROLE_TAB):
return True
if role in (controlTypes.ROLE_COMBOBOX, controlTypes.ROLE_EDITABLETEXT, controlTypes.ROLE_LIST, controlTypes.ROLE_SLIDER, controlTypes.ROLE_TABCONTROL, controlTypes.ROLE_MENUBAR, controlTypes.ROLE_POPUPMENU, controlTypes.ROLE_MENUITEM, controlTypes.ROLE_TREEVIEW, controlTypes.ROLE_TREEVIEWITEM, controlTypes.ROLE_SPINBUTTON, controlTypes.ROLE_TABLEROW, controlTypes.ROLE_TABLECELL, controlTypes.ROLE_TABLEROWHEADER, controlTypes.ROLE_TABLECOLUMNHEADER, controlTypes.ROLE_CHECKMENUITEM, controlTypes.ROLE_RADIOMENUITEM) or controlTypes.STATE_EDITABLE in states:
return True
if reason == controlTypes.REASON_FOCUS:
# If this is a focus change, pass through should be enabled for certain ancestor containers.
while obj and obj != self.rootNVDAObject:
if obj.role == controlTypes.ROLE_TOOLBAR:
return True
obj = obj.parent
return False
def event_caretMovementFailed(self, obj, nextHandler, gesture=None):
if not self.passThrough or not gesture or not config.conf["virtualBuffers"]["autoPassThroughOnCaretMove"]:
return nextHandler()
if gesture.mainKeyName in ("home", "end"):
# Home, end, control+home and control+end should not disable pass through.
return nextHandler()
script = self.getScript(gesture)
if not script:
return nextHandler()
# We've hit the edge of the focused control.
# Therefore, move the virtual caret to the same edge of the field.
info = self.makeTextInfo(textInfos.POSITION_CARET)
info.expand(info.UNIT_CONTROLFIELD)
if gesture.mainKeyName in ("leftArrow", "upArrow", "pageUp"):
info.collapse()
else:
info.collapse(end=True)
info.move(textInfos.UNIT_CHARACTER, -1)
info.updateCaret()
scriptHandler.queueScript(script, gesture)
def script_disablePassThrough(self, gesture):
if not self.passThrough or self.disableAutoPassThrough:
return gesture.send()
self.passThrough = False
self.disableAutoPassThrough = False
reportPassThrough(self)
script_disablePassThrough.ignoreTreeInterceptorPassThrough = True
def script_collapseOrExpandControl(self, gesture):
oldFocus = api.getFocusObject()
oldFocusStates = oldFocus.states
gesture.send()
if controlTypes.STATE_COLLAPSED in oldFocusStates:
self.passThrough = True
elif not self.disableAutoPassThrough:
self.passThrough = False
reportPassThrough(self)
script_collapseOrExpandControl.ignoreTreeInterceptorPassThrough = True
def _tabOverride(self, direction):
"""Override the tab order if the virtual caret is not within the currently focused node.
This is done because many nodes are not focusable and it is thus possible for the virtual caret to be unsynchronised with the focus.
In this case, we want tab/shift+tab to move to the next/previous focusable node relative to the virtual caret.
If the virtual caret is within the focused node, the tab/shift+tab key should be passed through to allow normal tab order navigation.
Note that this method does not pass the key through itself if it is not overridden. This should be done by the calling script if C{False} is returned.
@param direction: The direction in which to move.
@type direction: str
@return: C{True} if the tab order was overridden, C{False} if not.
@rtype: bool
"""
if self._lastCaretMoveWasFocus:
# #5227: If the caret was last moved due to a focus change, don't override tab.
# This ensures that tabbing behaves as expected after tabbing hits an iframe document.
return False
focus = api.getFocusObject()
try:
focusInfo = self.makeTextInfo(focus)
except:
return False
# We only want to override the tab order if the caret is not within the focused node.
caretInfo=self.makeTextInfo(textInfos.POSITION_CARET)
#Only check that the caret is within the focus for things that ar not documents
#As for documents we should always override
if focus.role!=controlTypes.ROLE_DOCUMENT or controlTypes.STATE_EDITABLE in focus.states:
# Expand to one character, as isOverlapping() doesn't yield the desired results with collapsed ranges.
caretInfo.expand(textInfos.UNIT_CHARACTER)
if focusInfo.isOverlapping(caretInfo):
return False
# If we reach here, we do want to override tab/shift+tab if possible.
# Find the next/previous focusable node.
try:
item = next(self._iterNodesByType("focusable", direction, caretInfo))
except StopIteration:
return False
obj=item.obj
newInfo=item.textInfo
if obj == api.getFocusObject():
# This node is already focused, so we need to move to and speak this node here.
newCaret = newInfo.copy()
newCaret.collapse()
self._set_selection(newCaret,reason=controlTypes.REASON_FOCUS)
if self.passThrough:
obj.event_gainFocus()
else:
speech.speakTextInfo(newInfo,reason=controlTypes.REASON_FOCUS)
else:
# This node doesn't have the focus, so just set focus to it. The gainFocus event will handle the rest.
obj.setFocus()
return True
def script_tab(self, gesture):
if not self._tabOverride("next"):
gesture.send()
def script_shiftTab(self, gesture):
if not self._tabOverride("previous"):
gesture.send()
def event_focusEntered(self,obj,nextHandler):
if obj==self.rootNVDAObject:
self._enteringFromOutside = True
if self.passThrough:
nextHandler()
def _shouldIgnoreFocus(self, obj):
"""Determines whether focus on a given object should be ignored.
@param obj: The object in question.
@type obj: L{NVDAObjects.NVDAObject}
@return: C{True} if focus on L{obj} should be ignored, C{False} otherwise.
@rtype: bool
"""
return False
def _postGainFocus(self, obj):
"""Executed after a gainFocus within the browseMode document.
This will not be executed if L{event_gainFocus} determined that it should abort and call nextHandler.
@param obj: The object that gained focus.
@type obj: L{NVDAObjects.NVDAObject}
"""
def _replayFocusEnteredEvents(self):
# We blocked the focusEntered events because we were in browse mode,
# but now that we've switched to focus mode, we need to fire them.
for parent in api.getFocusAncestors()[api.getFocusDifferenceLevel():]:
try:
parent.event_focusEntered()
except:
log.exception("Error executing focusEntered event: %s" % parent)
def event_gainFocus(self, obj, nextHandler):
enteringFromOutside=self._enteringFromOutside
self._enteringFromOutside=False
if not self.isReady:
if self.passThrough:
nextHandler()
return
if enteringFromOutside and not self.passThrough and self._lastFocusObj==obj:
# We're entering the document from outside (not returning from an inside object/application; #3145)
# and this was the last non-root node with focus, so ignore this focus event.
# Otherwise, if the user switches away and back to this document, the cursor will jump to this node.
# This is not ideal if the user was positioned over a node which cannot receive focus.
return
if obj==self.rootNVDAObject:
if self.passThrough:
return nextHandler()
return
if not self.passThrough and self._shouldIgnoreFocus(obj):
return
self._lastFocusObj=obj
try:
focusInfo = self.makeTextInfo(obj)
except:
# This object is not in the treeInterceptor, even though it resides beneath the document.
# Automatic pass through should be enabled in certain circumstances where this occurs.
if not self.passThrough and self.shouldPassThrough(obj,reason=controlTypes.REASON_FOCUS):
self.passThrough=True
reportPassThrough(self)
self._replayFocusEnteredEvents()
return nextHandler()
#We only want to update the caret and speak the field if we're not in the same one as before
caretInfo=self.makeTextInfo(textInfos.POSITION_CARET)
# Expand to one character, as isOverlapping() doesn't treat, for example, (4,4) and (4,5) as overlapping.
caretInfo.expand(textInfos.UNIT_CHARACTER)
if not self._hadFirstGainFocus or not focusInfo.isOverlapping(caretInfo):
# The virtual caret is not within the focus node.
oldPassThrough=self.passThrough
passThrough=self.shouldPassThrough(obj,reason=controlTypes.REASON_FOCUS)
if not oldPassThrough and (passThrough or sayAllHandler.isRunning()):
# If pass-through is disabled, cancel speech, as a focus change should cause page reading to stop.
# This must be done before auto-pass-through occurs, as we want to stop page reading even if pass-through will be automatically enabled by this focus change.
speech.cancelSpeech()
self.passThrough=passThrough
if not self.passThrough:
# We read the info from the browseMode document instead of the control itself.
speech.speakTextInfo(focusInfo,reason=controlTypes.REASON_FOCUS)
# However, we still want to update the speech property cache so that property changes will be spoken properly.
speech.speakObject(obj,controlTypes.REASON_ONLYCACHE)
else:
if not oldPassThrough:
self._replayFocusEnteredEvents()
nextHandler()
focusInfo.collapse()
self._set_selection(focusInfo,reason=controlTypes.REASON_FOCUS)
else:
# The virtual caret was already at the focused node.
if not self.passThrough:
# This focus change was caused by a virtual caret movement, so don't speak the focused node to avoid double speaking.
# However, we still want to update the speech property cache so that property changes will be spoken properly.
speech.speakObject(obj,controlTypes.REASON_ONLYCACHE)
else:
return nextHandler()
self._postGainFocus(obj)
event_gainFocus.ignoreIsReady=True
def _handleScrollTo(self, obj):
"""Handle scrolling the browseMode document to a given object in response to an event.
Subclasses should call this from an event which indicates that the document has scrolled.
@postcondition: The virtual caret is moved to L{obj} and the buffer content for L{obj} is reported.
@param obj: The object to which the document should scroll.
@type obj: L{NVDAObjects.NVDAObject}
@return: C{True} if the document was scrolled, C{False} if not.
@rtype: bool
@note: If C{False} is returned, calling events should probably call their nextHandler.
"""
if self.programmaticScrollMayFireEvent and self._lastProgrammaticScrollTime and time.time() - self._lastProgrammaticScrollTime < 0.4:
# This event was probably caused by this browseMode document's call to scrollIntoView().
# Therefore, ignore it. Otherwise, the cursor may bounce back to the scroll point.
# However, pretend we handled it, as we don't want it to be passed on to the object either.
return True
try:
scrollInfo = self.makeTextInfo(obj)
except:
return False
#We only want to update the caret and speak the field if we're not in the same one as before
caretInfo=self.makeTextInfo(textInfos.POSITION_CARET)
# Expand to one character, as isOverlapping() doesn't treat, for example, (4,4) and (4,5) as overlapping.
caretInfo.expand(textInfos.UNIT_CHARACTER)
if not scrollInfo.isOverlapping(caretInfo):
if scrollInfo.isCollapsed:
scrollInfo.expand(textInfos.UNIT_LINE)
speech.speakTextInfo(scrollInfo,reason=controlTypes.REASON_CARET)
scrollInfo.collapse()
self.selection = scrollInfo
return True
return False
APPLICATION_ROLES = (controlTypes.ROLE_APPLICATION, controlTypes.ROLE_DIALOG)
def _isNVDAObjectInApplication(self, obj):
"""Determine whether a given object is within an application.
The object is considered to be within an application if it or one of its ancestors has an application role.
This should only be called on objects beneath the treeInterceptor's root NVDAObject.
@param obj: The object in question.
@type obj: L{NVDAObjects.NVDAObject}
@return: C{True} if L{obj} is within an application, C{False} otherwise.
@rtype: bool
"""
# We cache the result for each object we walk.
# There can be browse mode documents within other documents and the result might be different between these,
# so the cache must be maintained on the TreeInterceptor rather than the object itself.
try:
cache = self._isInAppCache
except AttributeError:
# Create this lazily, as this method isn't used by all browse mode implementations.
cache = self._isInAppCache = weakref.WeakKeyDictionary()
objs = []
def doResult(result):
# Cache this on descendants we've walked over.
for obj in objs:
cache[obj] = result
return result
while obj and obj != self.rootNVDAObject:
inApp = cache.get(obj)
if inApp is not None:
# We found a cached result.
return doResult(inApp)
objs.append(obj)
if obj.role in self.APPLICATION_ROLES:
return doResult(True)
# Cache container.
container = obj.container
obj.container = container
obj = container
return doResult(False)
def _get_documentConstantIdentifier(self):
"""Get the constant identifier for this document.
This identifier should uniquely identify all instances (not just one instance) of a document for at least the current session of the hosting application.
Generally, the document URL should be used.
@return: The constant identifier for this document, C{None} if there is none.
"""
return None
def _get_shouldRememberCaretPositionAcrossLoads(self):
"""Specifies whether the position of the caret should be remembered when this document is loaded again.
This is useful when the browser remembers the scroll position for the document,
but does not communicate this information via APIs.
The remembered caret position is associated with this document using L{documentConstantIdentifier}.
@return: C{True} if the caret position should be remembered, C{False} if not.
@rtype: bool
"""
docConstId = self.documentConstantIdentifier
# Return True if the URL indicates that this is probably a web browser document.
# We do this check because we don't want to remember caret positions for email messages, etc.
return isinstance(docConstId, basestring) and docConstId.split("://", 1)[0] in ("http", "https", "ftp", "ftps", "file")
def _getInitialCaretPos(self):
"""Retrieve the initial position of the caret after the buffer has been loaded.
This position, if any, will be passed to L{makeTextInfo}.
Subclasses should extend this method.
@return: The initial position of the caret, C{None} if there isn't one.
@rtype: TextInfo position
"""
if self.shouldRememberCaretPositionAcrossLoads:
try:
return self.rootNVDAObject.appModule._browseModeRememberedCaretPositions[self.documentConstantIdentifier]
except KeyError:
pass
return None
def getEnclosingContainerRange(self,range):
range=range.copy()
range.collapse()
try:
item = next(self._iterNodesByType("container", "up", range))
except (NotImplementedError,StopIteration):
return
return item.textInfo
def script_moveToStartOfContainer(self,gesture):
info=self.makeTextInfo(textInfos.POSITION_CARET)
info.expand(textInfos.UNIT_CHARACTER)
container=self.getEnclosingContainerRange(info)
if not container:
# Translators: Reported when the user attempts to move to the start or end of a container (list, table, etc.)
# But there is no container.
ui.message(_("Not in a container"))
return
container.collapse()
self._set_selection(container, reason=REASON_QUICKNAV)
if not willSayAllResume(gesture):
container.expand(textInfos.UNIT_LINE)
speech.speakTextInfo(container, reason=controlTypes.REASON_FOCUS)
script_moveToStartOfContainer.resumeSayAllMode=sayAllHandler.CURSOR_CARET
# Translators: Description for the Move to start of container command in browse mode.
script_moveToStartOfContainer.__doc__=_("Moves to the start of the container element, such as a list or table")
def script_movePastEndOfContainer(self,gesture):
info=self.makeTextInfo(textInfos.POSITION_CARET)
info.expand(textInfos.UNIT_CHARACTER)
container=self.getEnclosingContainerRange(info)
if not container:
ui.message(_("Not in a container"))
return
container.collapse(end=True)
docEnd=container.obj.makeTextInfo(textInfos.POSITION_LAST)
if container.compareEndPoints(docEnd,"endToEnd")>=0:
container=docEnd
# Translators: a message reported when:
# Review cursor is at the bottom line of the current navigator object.
# Landing at the end of a browse mode document when trying to jump to the end of the current container.
ui.message(_("Bottom"))
self._set_selection(container, reason=REASON_QUICKNAV)
if not willSayAllResume(gesture):
container.expand(textInfos.UNIT_LINE)
speech.speakTextInfo(container, reason=controlTypes.REASON_FOCUS)
script_movePastEndOfContainer.resumeSayAllMode=sayAllHandler.CURSOR_CARET
# Translators: Description for the Move past end of container command in browse mode.
script_movePastEndOfContainer.__doc__=_("Moves past the end of the container element, such as a list or table")
NOT_LINK_BLOCK_MIN_LEN = 30
def _isSuitableNotLinkBlock(self,range):
return len(range.text)>=self.NOT_LINK_BLOCK_MIN_LEN
def _iterNotLinkBlock(self, direction="next", pos=None):
links = self._iterNodesByType("link", direction=direction, pos=pos)
# We want to compare each link against the next link.
item1 = next(links)
while True:
item2 = next(links)
# If the distance between the links is small, this is probably just a piece of non-link text within a block of links; e.g. an inactive link of a nav bar.
if direction=="previous":
range=item1.textInfo.copy()
range.collapse()
range.setEndPoint(item2.textInfo,"startToEnd")
else:
range=item2.textInfo.copy()
range.collapse()
range.setEndPoint(item1.textInfo,"startToEnd")
if self._isSuitableNotLinkBlock(range):
yield TextInfoQuickNavItem("notLinkBlock",self,range)
item1=item2
__gestures={
"kb:NVDA+d": "activateLongDesc",
"kb:escape": "disablePassThrough",
"kb:alt+upArrow": "collapseOrExpandControl",
"kb:alt+downArrow": "collapseOrExpandControl",
"kb:tab": "tab",
"kb:shift+tab": "shiftTab",
"kb:shift+,": "moveToStartOfContainer",
"kb:,": "movePastEndOfContainer",
}
| 43.931126
| 564
| 0.742855
|
0f95ee380a480356d455daf3e8a42549a213b639
| 414
|
py
|
Python
|
mera/closures_and_decorators/decorator_for_unicode.py
|
MikeLaptev/sandbox_python
|
90d9b520d24602fa298abed4bb85232e12550fb2
|
[
"Apache-2.0"
] | 1
|
2016-02-25T19:01:01.000Z
|
2016-02-25T19:01:01.000Z
|
mera/closures_and_decorators/decorator_for_unicode.py
|
MikeLaptev/sandbox_python
|
90d9b520d24602fa298abed4bb85232e12550fb2
|
[
"Apache-2.0"
] | null | null | null |
mera/closures_and_decorators/decorator_for_unicode.py
|
MikeLaptev/sandbox_python
|
90d9b520d24602fa298abed4bb85232e12550fb2
|
[
"Apache-2.0"
] | null | null | null |
# coding=UTF-8
'''
Created on Aug, 5 2015
@author: mlaptev
'''
def escape_unicode(fn):
def wrapped():
print "String to convert '{}'".format(fn())
return "".join([i if ord(i) < 128 else "\\" + str(hex(ord(i))) for i in fn()])
return wrapped
@escape_unicode
def some_non_latin_string():
return "This is just a строка!"
if __name__ == '__main__':
print some_non_latin_string()
| 20.7
| 86
| 0.620773
|
533311269171602111c5d8abc6f0187bcb42c249
| 11,620
|
py
|
Python
|
pykg2vec/data/generator.py
|
rpatil524/pykg2vec
|
492807b627574f95b0db9e7cb9f090c3c45a030a
|
[
"MIT"
] | 430
|
2019-04-17T19:04:25.000Z
|
2022-03-31T12:20:18.000Z
|
pykg2vec/data/generator.py
|
Sujit-O/pyKG2Vec
|
492807b627574f95b0db9e7cb9f090c3c45a030a
|
[
"MIT"
] | 102
|
2019-05-11T04:29:57.000Z
|
2022-02-16T12:56:28.000Z
|
pykg2vec/data/generator.py
|
Sujit-O/pyKG2Vec
|
492807b627574f95b0db9e7cb9f090c3c45a030a
|
[
"MIT"
] | 102
|
2019-06-11T08:40:38.000Z
|
2022-03-27T09:36:13.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module is for generating the batch data for training and testing.
"""
import torch
import numpy as np
from multiprocessing import Process, Queue
from pykg2vec.common import TrainingStrategy
def raw_data_generator(command_queue, raw_queue, config):
"""Function to feed triples to raw queue for multiprocessing.
Args:
command_queue (Queue) : Each enqueued is either a command or a number of batch size.
raw_queue (Queue) : Multiprocessing Queue to put the raw data to be processed.
config (pykg2vec.Config) : Consists of the necessary parameters for training configuration.
"""
data = config.knowledge_graph.read_cache_data('triplets_train')
number_of_batch = len(data) // config.batch_size
random_ids = np.random.permutation(len(data))
while True:
command = command_queue.get()
if command != "quit":
number_of_batch = command
for batch_idx in range(number_of_batch):
pos_start = config.batch_size * batch_idx
pos_end = config.batch_size * (batch_idx + 1)
raw_data = np.asarray([[data[x].h, data[x].r, data[x].t] for x in random_ids[pos_start:pos_end]])
raw_queue.put((batch_idx, raw_data))
else:
raw_queue.put(None)
raw_queue.put(None)
return
def process_function_pairwise(raw_queue, processed_queue, config):
"""Function that puts the processed data in the queue.
Args:
raw_queue (Queue) : Multiprocessing Queue to put the raw data to be processed.
processed_queue (Queue) : Multiprocessing Queue to put the processed data.
config (pykg2vec.Config) : Consists of the necessary parameters for training configuration.
"""
data = config.knowledge_graph.read_cache_data('triplets_train')
relation_property = config.knowledge_graph.read_cache_data('relationproperty')
positive_triplets = {(t.h, t.r, t.t): 1 for t in data}
neg_rate = config.neg_rate
del data # save memory space
while True:
item = raw_queue.get()
if item is None:
return
_, pos_triples = item
ph = pos_triples[:, 0]
pr = pos_triples[:, 1]
pt = pos_triples[:, 2]
nh = []
nr = []
nt = []
for t in pos_triples:
prob = relation_property[t[1]] if config.sampling == "bern" else 0.5
for _ in range(neg_rate):
if np.random.random() > prob:
idx_replace_tail = np.random.randint(config.tot_entity)
while (t[0], t[1], idx_replace_tail) in positive_triplets:
idx_replace_tail = np.random.randint(config.tot_entity)
nh.append(t[0])
nr.append(t[1])
nt.append(idx_replace_tail)
else:
idx_replace_head = np.random.randint(config.tot_entity)
while (idx_replace_head, t[1], t[2]) in positive_triplets:
idx_replace_head = np.random.randint(config.tot_entity)
nh.append(idx_replace_head)
nr.append(t[1])
nt.append(t[2])
processed_queue.put([ph, pr, pt, nh, nr, nt])
def process_function_pointwise(raw_queue, processed_queue, config):
"""Function that puts the processed data in the queue.
Args:
raw_queue (Queue) : Multiprocessing Queue to put the raw data to be processed.
processed_queue (Queue) : Multiprocessing Queue to put the processed data.
config (pykg2vec.Config) : Consists of the necessary parameters for training configuration.
"""
data = config.knowledge_graph.read_cache_data('triplets_train')
relation_property = config.knowledge_graph.read_cache_data('relationproperty')
positive_triplets = {(t.h, t.r, t.t): 1 for t in data}
neg_rate = config.neg_rate
del data # save memory space
while True:
item = raw_queue.get()
if item is None:
return
_, pos_triples = item
point_h = []
point_r = []
point_t = []
point_y = []
for t in pos_triples:
# postive sample
point_h.append(t[0])
point_r.append(t[1])
point_t.append(t[2])
point_y.append(1)
prob = relation_property[t[1]] if config.sampling == "bern" else 0.5
for _ in range(neg_rate):
if np.random.random() > prob:
idx_replace_tail = np.random.randint(config.tot_entity)
while (t[0], t[1], idx_replace_tail) in positive_triplets:
idx_replace_tail = np.random.randint(config.tot_entity)
point_h.append(t[0])
point_r.append(t[1])
point_t.append(idx_replace_tail)
point_y.append(-1)
else:
idx_replace_head = np.random.randint(config.tot_entity)
while (idx_replace_head, t[1], t[2]) in positive_triplets:
idx_replace_head = np.random.randint(config.tot_entity)
point_h.append(idx_replace_head)
point_r.append(t[1])
point_t.append(t[2])
point_y.append(-1)
processed_queue.put([point_h, point_r, point_t, point_y])
def process_function_multiclass(raw_queue, processed_queue, config):
"""Function that puts the processed data in the queue.
Args:
raw_queue (Queue) : Multiprocessing Queue to put the raw data to be processed.
processed_queue (Queue) : Multiprocessing Queue to put the processed data.
config (pykg2vec.Config) : Consists of the necessary parameters for training configuration.
"""
def _to_sparse_i(indices):
x = []
y = []
for index in indices:
x.append(index[0])
y.append(index[1])
return [x, y]
hr_t_train = config.knowledge_graph.read_cache_data('hr_t_train')
tr_h_train = config.knowledge_graph.read_cache_data('tr_h_train')
neg_rate = config.neg_rate
shape = [config.batch_size, config.tot_entity]
while True:
item = raw_queue.get()
if item is None:
return
idx, raw_data = item
h = raw_data[:, 0]
r = raw_data[:, 1]
t = raw_data[:, 2]
indices_hr_t = []
indices_tr_h = []
neg_indices_hr_t = []
neg_indices_tr_h = []
random_ids = np.random.permutation(config.tot_entity)
for i in range(config.batch_size):
hr_t = hr_t_train[(h[i], r[i])]
tr_h = tr_h_train[(t[i], r[i])]
for idx in hr_t:
indices_hr_t.append([i, idx])
for idx in tr_h:
indices_tr_h.append([i, idx])
if neg_rate > 0:
for idx in random_ids[0:100]:
if idx not in hr_t:
neg_indices_hr_t.append([i, idx])
for idx in random_ids[0:100]:
if idx not in tr_h:
neg_indices_tr_h.append([i, idx])
values_hr_t = torch.FloatTensor([1]).repeat([len(indices_hr_t)])
values_tr_h = torch.FloatTensor([1]).repeat([len(indices_tr_h)])
if neg_rate > 0:
neg_values_hr_t = torch.FloatTensor([-1]).repeat([len(neg_indices_hr_t)])
neg_values_tr_h = torch.FloatTensor([-1]).repeat([len(neg_indices_tr_h)])
# It looks Torch sparse tensor does not work in multi processing
# so they need to be converted to dense, which is not memory efficient
# https://github.com/pytorch/pytorch/pull/27062
# https://github.com/pytorch/pytorch/issues/20248
hr_t = torch.sparse.LongTensor(torch.LongTensor(_to_sparse_i(indices_hr_t)), values_hr_t, torch.Size(shape)).to_dense()
tr_h = torch.sparse.LongTensor(torch.LongTensor(_to_sparse_i(indices_tr_h)), values_tr_h, torch.Size(shape)).to_dense()
if neg_rate > 0:
neg_hr_t = torch.sparse.LongTensor(torch.LongTensor(_to_sparse_i(neg_indices_hr_t)), neg_values_hr_t, torch.Size(shape)).to_dense()
neg_tr_h = torch.sparse.LongTensor(torch.LongTensor(_to_sparse_i(neg_indices_tr_h)), neg_values_tr_h, torch.Size(shape)).to_dense()
hr_t = hr_t.add(neg_hr_t)
tr_h = tr_h.add(neg_tr_h)
processed_queue.put([h, r, t, hr_t, tr_h])
class Generator:
"""Generator class for the embedding algorithms
Args:
config (object): generator configuration object.
model_config (object): Model configuration object.
Yields:
matrix : Batch size of processed triples
Examples:
>>> from pykg2vec.utils.generator import Generator
>>> from pykg2vec.models.TransE impor TransE
>>> model = TransE()
>>> gen_train = Generator(model.config, training_strategy=TrainingStrategy.PAIRWISE_BASED)
"""
def __init__(self, model, config):
self.model = model
self.config = config
self.training_strategy = model.training_strategy
self.process_list = []
self.raw_queue_size = 10
self.processed_queue_size = 10
self.command_queue = Queue(self.raw_queue_size)
self.raw_queue = Queue(self.raw_queue_size)
self.processed_queue = Queue(self.processed_queue_size)
self.create_feeder_process()
self.create_train_processor_process()
def __iter__(self):
return self
def __next__(self):
return self.processed_queue.get()
def stop(self):
"""Function to stop all the worker process."""
self.command_queue.put("quit")
for worker_process in self.process_list:
while True:
worker_process.join(1)
if not worker_process.is_alive():
break
def create_feeder_process(self):
"""Function create the feeder process."""
feeder_worker = Process(target=raw_data_generator, args=(self.command_queue, self.raw_queue, self.config))
self.process_list.append(feeder_worker)
feeder_worker.daemon = True
feeder_worker.start()
def create_train_processor_process(self):
"""Function ro create the process for generating training samples."""
for _ in range(self.config.num_process_gen):
if self.training_strategy == TrainingStrategy.PROJECTION_BASED:
process_worker = Process(target=process_function_multiclass, args=(self.raw_queue, self.processed_queue, self.config))
elif self.training_strategy == TrainingStrategy.PAIRWISE_BASED:
process_worker = Process(target=process_function_pairwise, args=(self.raw_queue, self.processed_queue, self.config))
elif self.training_strategy == TrainingStrategy.POINTWISE_BASED:
process_worker = Process(target=process_function_pointwise, args=(self.raw_queue, self.processed_queue, self.config))
else:
raise NotImplementedError("This strategy is not supported.")
self.process_list.append(process_worker)
process_worker.daemon = True
process_worker.start()
def start_one_epoch(self, num_batch):
self.command_queue.put(num_batch)
| 36.772152
| 143
| 0.612048
|
26241c6f91b25078f81333b3ca873fd7c292b2d5
| 1,157
|
py
|
Python
|
posts/migrations/0004_auto_20200722_1214.py
|
qoukka/hw05_final
|
f234514034c513e37035674fcd6b629f873b4b6b
|
[
"MIT"
] | null | null | null |
posts/migrations/0004_auto_20200722_1214.py
|
qoukka/hw05_final
|
f234514034c513e37035674fcd6b629f873b4b6b
|
[
"MIT"
] | 12
|
2021-03-19T13:16:24.000Z
|
2022-03-12T00:52:03.000Z
|
posts/migrations/0004_auto_20200722_1214.py
|
qoukka/hw05_final
|
f234514034c513e37035674fcd6b629f873b4b6b
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.5 on 2020-07-22 09:14
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('posts', '0003_auto_20200718_1452'),
]
operations = [
migrations.AddField(
model_name='post',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='posts/'),
),
migrations.AlterField(
model_name='post',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='author_posts', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='post',
name='group',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='group_posts', to='posts.Group'),
),
migrations.AlterField(
model_name='post',
name='pub_date',
field=models.DateTimeField(auto_now_add=True, verbose_name='Дата публикации'),
),
]
| 32.138889
| 150
| 0.62057
|
70fb57751e5b3ac0b83d51f9695c98df7882591a
| 469
|
py
|
Python
|
examples/tests/test_paired_assoc.py
|
kat-dearstyne/think
|
1f6de81b694e05996948639b7f7ce14b6dd4ecae
|
[
"MIT"
] | 2
|
2018-04-18T11:14:19.000Z
|
2020-07-23T16:25:41.000Z
|
examples/tests/test_paired_assoc.py
|
kat-dearstyne/think
|
1f6de81b694e05996948639b7f7ce14b6dd4ecae
|
[
"MIT"
] | 1
|
2019-02-08T15:11:50.000Z
|
2019-04-30T10:14:46.000Z
|
examples/tests/test_paired_assoc.py
|
kat-dearstyne/think
|
1f6de81b694e05996948639b7f7ce14b6dd4ecae
|
[
"MIT"
] | 1
|
2020-12-20T17:43:40.000Z
|
2020-12-20T17:43:40.000Z
|
import unittest
from examples.paired_assoc import PairedAssociatesSimulation
class PairedAssociatesTest(unittest.TestCase):
def test_paired_associates(self, output=False):
sim = PairedAssociatesSimulation(n_sims=2)
result_correct, result_rt = sim.run()
self.assertGreater(result_correct.r, .80)
self.assertGreater(result_rt.r, .80)
self.assertLess(result_correct.nrmse, .20)
self.assertLess(result_rt.nrmse, .20)
| 31.266667
| 60
| 0.733475
|
996caa9c2cd403e9511c0d159d0cefe7f6fd1b3a
| 14,892
|
py
|
Python
|
salt/utils/dictdiffer.py
|
Noah-Huppert/salt
|
998c382f5f2c3b4cbf7d96aa6913ada6993909b3
|
[
"Apache-2.0"
] | 19
|
2016-01-29T14:37:52.000Z
|
2022-03-30T18:08:01.000Z
|
salt/utils/dictdiffer.py
|
Noah-Huppert/salt
|
998c382f5f2c3b4cbf7d96aa6913ada6993909b3
|
[
"Apache-2.0"
] | 223
|
2016-03-02T16:39:41.000Z
|
2022-03-03T12:26:35.000Z
|
salt/utils/dictdiffer.py
|
Noah-Huppert/salt
|
998c382f5f2c3b4cbf7d96aa6913ada6993909b3
|
[
"Apache-2.0"
] | 64
|
2016-02-04T19:45:26.000Z
|
2021-12-15T02:02:31.000Z
|
# -*- coding: utf-8 -*-
"""
Calculate the difference between two dictionaries as:
(1) items added
(2) items removed
(3) keys same in both but changed values
(4) keys same in both and unchanged values
Originally posted at http://stackoverflow.com/questions/1165352/fast-comparison-between-two-python-dictionary/1165552#1165552
Available at repository: https://github.com/hughdbrown/dictdiffer
Added the ability to recursively compare dictionaries
"""
from __future__ import absolute_import, print_function, unicode_literals
import copy
from collections.abc import Mapping
from salt.ext import six
def diff(current_dict, past_dict):
return DictDiffer(current_dict, past_dict)
class DictDiffer(object):
"""
Calculate the difference between two dictionaries as:
(1) items added
(2) items removed
(3) keys same in both but changed values
(4) keys same in both and unchanged values
"""
def __init__(self, current_dict, past_dict):
self.current_dict, self.past_dict = current_dict, past_dict
self.set_current, self.set_past = set(list(current_dict)), set(list(past_dict))
self.intersect = self.set_current.intersection(self.set_past)
def added(self):
return self.set_current - self.intersect
def removed(self):
return self.set_past - self.intersect
def changed(self):
return set(
o for o in self.intersect if self.past_dict[o] != self.current_dict[o]
)
def unchanged(self):
return set(
o for o in self.intersect if self.past_dict[o] == self.current_dict[o]
)
def deep_diff(old, new, ignore=None):
ignore = ignore or []
res = {}
old = copy.deepcopy(old)
new = copy.deepcopy(new)
stack = [(old, new, False)]
while stack:
tmps = []
tmp_old, tmp_new, reentrant = stack.pop()
for key in set(list(tmp_old) + list(tmp_new)):
if key in tmp_old and key in tmp_new and tmp_old[key] == tmp_new[key]:
del tmp_old[key]
del tmp_new[key]
continue
if not reentrant:
if key in tmp_old and key in ignore:
del tmp_old[key]
if key in tmp_new and key in ignore:
del tmp_new[key]
if isinstance(tmp_old.get(key), Mapping) and isinstance(
tmp_new.get(key), Mapping
):
tmps.append((tmp_old[key], tmp_new[key], False))
if tmps:
stack.extend([(tmp_old, tmp_new, True)] + tmps)
if old:
res["old"] = old
if new:
res["new"] = new
return res
def recursive_diff(past_dict, current_dict, ignore_missing_keys=True):
"""
Returns a RecursiveDictDiffer object that computes the recursive diffs
between two dictionaries
past_dict
Past dictionary
current_dict
Current dictionary
ignore_missing_keys
Flag specifying whether to ignore keys that no longer exist in the
current_dict, but exist in the past_dict. If true, the diff will
not contain the missing keys.
Default is True.
"""
return RecursiveDictDiffer(past_dict, current_dict, ignore_missing_keys)
class RecursiveDictDiffer(DictDiffer):
"""
Calculates a recursive diff between the current_dict and the past_dict
creating a diff in the format
{'new': new_value, 'old': old_value}
It recursively searches differences in common keys whose values are
dictionaries creating a diff dict in the format
{'common_key' : {'new': new_value, 'old': old_value}
The class overrides all DictDiffer methods, returning lists of keys and
subkeys using the . notation (i.e 'common_key1.common_key2.changed_key')
The class provides access to:
(1) the added, removed, changes keys and subkeys (using the . notation)
``added``, ``removed``, ``changed`` methods
(2) the diffs in the format above (diff property)
``diffs`` property
(3) a dict with the new changed values only (new_values property)
``new_values`` property
(4) a dict with the old changed values only (old_values property)
``old_values`` property
(5) a string representation of the changes in the format:
``changes_str`` property
Note:
The <_null_> value is a reserved value
.. code-block:: text
common_key1:
common_key2:
changed_key1 from '<old_str>' to '<new_str>'
changed_key2 from '[<old_elem1>, ..]' to '[<new_elem1>, ..]'
common_key3:
changed_key3 from <old_int> to <new_int>
"""
NONE_VALUE = "<_null_>"
def __init__(self, past_dict, current_dict, ignore_missing_keys):
"""
past_dict
Past dictionary.
current_dict
Current dictionary.
ignore_missing_keys
Flag specifying whether to ignore keys that no longer exist in the
current_dict, but exist in the past_dict. If true, the diff will
not contain the missing keys.
"""
super(RecursiveDictDiffer, self).__init__(current_dict, past_dict)
self._diffs = self._get_diffs(
self.current_dict, self.past_dict, ignore_missing_keys
)
# Ignores unet values when assessing the changes
self.ignore_unset_values = True
@classmethod
def _get_diffs(cls, dict1, dict2, ignore_missing_keys):
"""
Returns a dict with the differences between dict1 and dict2
Notes:
Keys that only exist in dict2 are not included in the diff if
ignore_missing_keys is True, otherwise they are
Simple compares are done on lists
"""
ret_dict = {}
for p in dict1.keys():
if p not in dict2:
ret_dict.update({p: {"new": dict1[p], "old": cls.NONE_VALUE}})
elif dict1[p] != dict2[p]:
if isinstance(dict1[p], dict) and isinstance(dict2[p], dict):
sub_diff_dict = cls._get_diffs(
dict1[p], dict2[p], ignore_missing_keys
)
if sub_diff_dict:
ret_dict.update({p: sub_diff_dict})
else:
ret_dict.update({p: {"new": dict1[p], "old": dict2[p]}})
if not ignore_missing_keys:
for p in dict2.keys():
if p not in dict1.keys():
ret_dict.update({p: {"new": cls.NONE_VALUE, "old": dict2[p]}})
return ret_dict
@classmethod
def _get_values(cls, diff_dict, type="new"):
"""
Returns a dictionaries with the 'new' values in a diff dict.
type
Which values to return, 'new' or 'old'
"""
ret_dict = {}
for p in diff_dict.keys():
if type in diff_dict[p].keys():
ret_dict.update({p: diff_dict[p][type]})
else:
ret_dict.update({p: cls._get_values(diff_dict[p], type=type)})
return ret_dict
@classmethod
def _get_changes(cls, diff_dict):
"""
Returns a list of string message with the differences in a diff dict.
Each inner difference is tabulated two space deeper
"""
changes_strings = []
for p in sorted(diff_dict.keys()):
if sorted(diff_dict[p].keys()) == ["new", "old"]:
# Some string formatting
old_value = diff_dict[p]["old"]
if diff_dict[p]["old"] == cls.NONE_VALUE:
old_value = "nothing"
elif isinstance(diff_dict[p]["old"], six.string_types):
old_value = "'{0}'".format(diff_dict[p]["old"])
elif isinstance(diff_dict[p]["old"], list):
old_value = "'{0}'".format(", ".join(diff_dict[p]["old"]))
new_value = diff_dict[p]["new"]
if diff_dict[p]["new"] == cls.NONE_VALUE:
new_value = "nothing"
elif isinstance(diff_dict[p]["new"], six.string_types):
new_value = "'{0}'".format(diff_dict[p]["new"])
elif isinstance(diff_dict[p]["new"], list):
new_value = "'{0}'".format(", ".join(diff_dict[p]["new"]))
changes_strings.append(
"{0} from {1} to {2}".format(p, old_value, new_value)
)
else:
sub_changes = cls._get_changes(diff_dict[p])
if sub_changes:
changes_strings.append("{0}:".format(p))
changes_strings.extend([" {0}".format(c) for c in sub_changes])
return changes_strings
def added(self):
"""
Returns all keys that have been added.
If the keys are in child dictionaries they will be represented with
. notation
"""
def _added(diffs, prefix):
keys = []
for key in diffs.keys():
if isinstance(diffs[key], dict) and "old" not in diffs[key]:
keys.extend(
_added(diffs[key], prefix="{0}{1}.".format(prefix, key))
)
elif diffs[key]["old"] == self.NONE_VALUE:
if isinstance(diffs[key]["new"], dict):
keys.extend(
_added(
diffs[key]["new"], prefix="{0}{1}.".format(prefix, key)
)
)
else:
keys.append("{0}{1}".format(prefix, key))
return keys
return sorted(_added(self._diffs, prefix=""))
def removed(self):
"""
Returns all keys that have been removed.
If the keys are in child dictionaries they will be represented with
. notation
"""
def _removed(diffs, prefix):
keys = []
for key in diffs.keys():
if isinstance(diffs[key], dict) and "old" not in diffs[key]:
keys.extend(
_removed(diffs[key], prefix="{0}{1}.".format(prefix, key))
)
elif diffs[key]["new"] == self.NONE_VALUE:
keys.append("{0}{1}".format(prefix, key))
elif isinstance(diffs[key]["new"], dict):
keys.extend(
_removed(
diffs[key]["new"], prefix="{0}{1}.".format(prefix, key)
)
)
return keys
return sorted(_removed(self._diffs, prefix=""))
def changed(self):
"""
Returns all keys that have been changed.
If the keys are in child dictionaries they will be represented with
. notation
"""
def _changed(diffs, prefix):
keys = []
for key in diffs.keys():
if not isinstance(diffs[key], dict):
continue
if isinstance(diffs[key], dict) and "old" not in diffs[key]:
keys.extend(
_changed(diffs[key], prefix="{0}{1}.".format(prefix, key))
)
continue
if self.ignore_unset_values:
if (
"old" in diffs[key]
and "new" in diffs[key]
and diffs[key]["old"] != self.NONE_VALUE
and diffs[key]["new"] != self.NONE_VALUE
):
if isinstance(diffs[key]["new"], dict):
keys.extend(
_changed(
diffs[key]["new"],
prefix="{0}{1}.".format(prefix, key),
)
)
else:
keys.append("{0}{1}".format(prefix, key))
elif isinstance(diffs[key], dict):
keys.extend(
_changed(diffs[key], prefix="{0}{1}.".format(prefix, key))
)
else:
if "old" in diffs[key] and "new" in diffs[key]:
if isinstance(diffs[key]["new"], dict):
keys.extend(
_changed(
diffs[key]["new"],
prefix="{0}{1}.".format(prefix, key),
)
)
else:
keys.append("{0}{1}".format(prefix, key))
elif isinstance(diffs[key], dict):
keys.extend(
_changed(diffs[key], prefix="{0}{1}.".format(prefix, key))
)
return keys
return sorted(_changed(self._diffs, prefix=""))
def unchanged(self):
"""
Returns all keys that have been unchanged.
If the keys are in child dictionaries they will be represented with
. notation
"""
def _unchanged(current_dict, diffs, prefix):
keys = []
for key in current_dict.keys():
if key not in diffs:
keys.append("{0}{1}".format(prefix, key))
elif isinstance(current_dict[key], dict):
if "new" in diffs[key]:
# There is a diff
continue
else:
keys.extend(
_unchanged(
current_dict[key],
diffs[key],
prefix="{0}{1}.".format(prefix, key),
)
)
return keys
return sorted(_unchanged(self.current_dict, self._diffs, prefix=""))
@property
def diffs(self):
"""Returns a dict with the recursive diffs current_dict - past_dict"""
return self._diffs
@property
def new_values(self):
"""Returns a dictionary with the new values"""
return self._get_values(self._diffs, type="new")
@property
def old_values(self):
"""Returns a dictionary with the old values"""
return self._get_values(self._diffs, type="old")
@property
def changes_str(self):
"""Returns a string describing the changes"""
return "\n".join(self._get_changes(self._diffs))
| 35.798077
| 127
| 0.517862
|
906ef2feaaa20838c79a2b7e1d53eb7e74be7d69
| 6,400
|
py
|
Python
|
BBB-firmware/u-boot-v2018.05-rc2/test/py/tests/test_efi_loader.py
|
guileschool/BEAGLEBONE-tutorials
|
eecd83e0c14941b05ad38eeb77e5a50602cc29ca
|
[
"MIT"
] | 4
|
2018-09-28T04:33:26.000Z
|
2021-03-10T06:29:55.000Z
|
BBB-firmware/u-boot-v2018.05-rc2/test/py/tests/test_efi_loader.py
|
guileschool/BEAGLEBONE-tutorials
|
eecd83e0c14941b05ad38eeb77e5a50602cc29ca
|
[
"MIT"
] | 4
|
2016-08-30T11:30:25.000Z
|
2020-12-27T09:58:07.000Z
|
BBB-firmware/u-boot-v2018.05-rc2/test/py/tests/test_efi_loader.py
|
guileschool/BEAGLEBONE-tutorials
|
eecd83e0c14941b05ad38eeb77e5a50602cc29ca
|
[
"MIT"
] | 2
|
2016-12-30T08:02:57.000Z
|
2020-05-16T05:59:30.000Z
|
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2016, Alexander Graf <agraf@suse.de>
#
# based on test_net.py.
#
# SPDX-License-Identifier: GPL-2.0
# Test efi loader implementation
import pytest
import u_boot_utils
"""
Note: This test relies on boardenv_* containing configuration values to define
which network environment is available for testing. Without this, the parts
that rely on network will be automatically skipped.
For example:
# Boolean indicating whether the Ethernet device is attached to USB, and hence
# USB enumeration needs to be performed prior to network tests.
# This variable may be omitted if its value is False.
env__net_uses_usb = False
# Boolean indicating whether the Ethernet device is attached to PCI, and hence
# PCI enumeration needs to be performed prior to network tests.
# This variable may be omitted if its value is False.
env__net_uses_pci = True
# True if a DHCP server is attached to the network, and should be tested.
# If DHCP testing is not possible or desired, this variable may be omitted or
# set to False.
env__net_dhcp_server = True
# A list of environment variables that should be set in order to configure a
# static IP. If solely relying on DHCP, this variable may be omitted or set to
# an empty list.
env__net_static_env_vars = [
("ipaddr", "10.0.0.100"),
("netmask", "255.255.255.0"),
("serverip", "10.0.0.1"),
]
# Details regarding a file that may be read from a TFTP server. This variable
# may be omitted or set to None if TFTP testing is not possible or desired.
env__efi_loader_helloworld_file = {
"fn": "lib/efi_loader/helloworld.efi",
"size": 5058624,
"crc32": "c2244b26",
}
"""
net_set_up = False
def test_efi_pre_commands(u_boot_console):
"""Execute any commands required to enable network hardware.
These commands are provided by the boardenv_* file; see the comment at the
beginning of this file.
"""
init_usb = u_boot_console.config.env.get('env__net_uses_usb', False)
if init_usb:
u_boot_console.run_command('usb start')
init_pci = u_boot_console.config.env.get('env__net_uses_pci', False)
if init_pci:
u_boot_console.run_command('pci enum')
@pytest.mark.buildconfigspec('cmd_dhcp')
def test_efi_dhcp(u_boot_console):
"""Test the dhcp command.
The boardenv_* file may be used to enable/disable this test; see the
comment at the beginning of this file.
"""
test_dhcp = u_boot_console.config.env.get('env__net_dhcp_server', False)
if not test_dhcp:
pytest.skip('No DHCP server available')
u_boot_console.run_command('setenv autoload no')
output = u_boot_console.run_command('dhcp')
assert 'DHCP client bound to address ' in output
global net_set_up
net_set_up = True
@pytest.mark.buildconfigspec('net')
def test_efi_setup_static(u_boot_console):
"""Set up a static IP configuration.
The configuration is provided by the boardenv_* file; see the comment at
the beginning of this file.
"""
env_vars = u_boot_console.config.env.get('env__net_static_env_vars', None)
if not env_vars:
pytest.skip('No static network configuration is defined')
for (var, val) in env_vars:
u_boot_console.run_command('setenv %s %s' % (var, val))
global net_set_up
net_set_up = True
def fetch_tftp_file(u_boot_console, env_conf):
"""Grab an env described file via TFTP and return its address
A file as described by an env config <env_conf> is downloaded from the TFTP
server. The address to that file is returned.
"""
if not net_set_up:
pytest.skip('Network not initialized')
f = u_boot_console.config.env.get(env_conf, None)
if not f:
pytest.skip('No %s binary specified in environment' % env_conf)
addr = f.get('addr', None)
if not addr:
addr = u_boot_utils.find_ram_base(u_boot_console) + (1024 * 1024 * 4)
fn = f['fn']
output = u_boot_console.run_command('tftpboot %x %s' % (addr, fn))
expected_text = 'Bytes transferred = '
sz = f.get('size', None)
if sz:
expected_text += '%d' % sz
assert expected_text in output
expected_crc = f.get('crc32', None)
if not expected_crc:
return addr
if u_boot_console.config.buildconfig.get('config_cmd_crc32', 'n') != 'y':
return addr
output = u_boot_console.run_command('crc32 %x $filesize' % addr)
assert expected_crc in output
return addr
@pytest.mark.buildconfigspec('cmd_bootefi_hello_compile')
def test_efi_helloworld_net(u_boot_console):
"""Run the helloworld.efi binary via TFTP.
The helloworld.efi file is downloaded from the TFTP server and gets
executed.
"""
addr = fetch_tftp_file(u_boot_console, 'env__efi_loader_helloworld_file')
output = u_boot_console.run_command('bootefi %x' % addr)
expected_text = 'Hello, world'
assert expected_text in output
expected_text = '## Application terminated, r = 0'
assert expected_text in output
@pytest.mark.buildconfigspec('cmd_bootefi_hello')
def test_efi_helloworld_builtin(u_boot_console):
"""Run the builtin helloworld.efi binary.
The helloworld.efi file is included in U-Boot, execute it using the
special "bootefi hello" command.
"""
output = u_boot_console.run_command('bootefi hello')
expected_text = 'Hello, world'
assert expected_text in output
@pytest.mark.buildconfigspec('cmd_bootefi')
def test_efi_grub_net(u_boot_console):
"""Run the grub.efi binary via TFTP.
The grub.efi file is downloaded from the TFTP server and gets
executed.
"""
addr = fetch_tftp_file(u_boot_console, 'env__efi_loader_grub_file')
u_boot_console.run_command('bootefi %x' % addr, wait_for_prompt=False)
# Verify that we have an SMBIOS table
check_smbios = u_boot_console.config.env.get('env__efi_loader_check_smbios', False)
if check_smbios:
u_boot_console.wait_for('grub>')
output = u_boot_console.run_command('lsefisystab', wait_for_prompt=False, wait_for_echo=False)
u_boot_console.wait_for('SMBIOS')
# Then exit cleanly
u_boot_console.wait_for('grub>')
output = u_boot_console.run_command('exit', wait_for_prompt=False, wait_for_echo=False)
u_boot_console.wait_for('r = 0')
# And give us our U-Boot prompt back
u_boot_console.run_command('')
| 32.323232
| 102
| 0.71875
|
57f70e1c401e87c2f443245b450b905b8294b004
| 1,976
|
py
|
Python
|
07_prot/solution1_for.py
|
ilaydabozan/biofx_python
|
b7bef85dcf0b0a9e049f10a0766b9da20bf676c7
|
[
"MIT"
] | 74
|
2020-12-18T16:04:31.000Z
|
2022-03-02T09:05:54.000Z
|
07_prot/solution1_for.py
|
ilaydabozan/biofx_python
|
b7bef85dcf0b0a9e049f10a0766b9da20bf676c7
|
[
"MIT"
] | 6
|
2021-06-30T19:42:04.000Z
|
2022-02-07T04:45:31.000Z
|
07_prot/solution1_for.py
|
ilaydabozan/biofx_python
|
b7bef85dcf0b0a9e049f10a0766b9da20bf676c7
|
[
"MIT"
] | 169
|
2020-11-06T19:44:36.000Z
|
2022-03-30T08:38:42.000Z
|
#!/usr/bin/env python3
""" Translate DNA/RNA to proteins """
import argparse
from typing import NamedTuple
class Args(NamedTuple):
""" Command-line arguments """
rna: str
# --------------------------------------------------
def get_args() -> Args:
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Translate RNA to proteins',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('rna', type=str, metavar='RNA', help='RNA sequence')
args = parser.parse_args()
return Args(args.rna)
# --------------------------------------------------
def main() -> None:
"""Make a jazz noise here"""
args = get_args()
rna = args.rna.upper()
codon_to_aa = {
'AAA': 'K', 'AAC': 'N', 'AAG': 'K', 'AAU': 'N', 'ACA': 'T',
'ACC': 'T', 'ACG': 'T', 'ACU': 'T', 'AGA': 'R', 'AGC': 'S',
'AGG': 'R', 'AGU': 'S', 'AUA': 'I', 'AUC': 'I', 'AUG': 'M',
'AUU': 'I', 'CAA': 'Q', 'CAC': 'H', 'CAG': 'Q', 'CAU': 'H',
'CCA': 'P', 'CCC': 'P', 'CCG': 'P', 'CCU': 'P', 'CGA': 'R',
'CGC': 'R', 'CGG': 'R', 'CGU': 'R', 'CUA': 'L', 'CUC': 'L',
'CUG': 'L', 'CUU': 'L', 'GAA': 'E', 'GAC': 'D', 'GAG': 'E',
'GAU': 'D', 'GCA': 'A', 'GCC': 'A', 'GCG': 'A', 'GCU': 'A',
'GGA': 'G', 'GGC': 'G', 'GGG': 'G', 'GGU': 'G', 'GUA': 'V',
'GUC': 'V', 'GUG': 'V', 'GUU': 'V', 'UAC': 'Y', 'UAU': 'Y',
'UCA': 'S', 'UCC': 'S', 'UCG': 'S', 'UCU': 'S', 'UGC': 'C',
'UGG': 'W', 'UGU': 'C', 'UUA': 'L', 'UUC': 'F', 'UUG': 'L',
'UUU': 'F', 'UAA': '*', 'UAG': '*', 'UGA': '*',
}
# Method 1: for loop
k = 3
protein = ''
for codon in [rna[i:i + k] for i in range(0, len(rna), k)]:
aa = codon_to_aa.get(codon, '-')
if aa == '*':
break
protein += aa
print(protein)
# --------------------------------------------------
if __name__ == '__main__':
main()
| 30.4
| 76
| 0.413968
|
0585a657171a9ab5de159cf5807d1f4088c02e64
| 1,118
|
py
|
Python
|
packages/merlin/assets/File.py
|
pyre/pyre
|
0f903836f52450bf81216c5dfdfdfebb16090177
|
[
"BSD-3-Clause"
] | 25
|
2018-04-23T01:45:39.000Z
|
2021-12-10T06:01:23.000Z
|
packages/merlin/assets/File.py
|
pyre/pyre
|
0f903836f52450bf81216c5dfdfdfebb16090177
|
[
"BSD-3-Clause"
] | 53
|
2018-05-31T04:55:00.000Z
|
2021-10-07T21:41:32.000Z
|
packages/merlin/assets/File.py
|
pyre/pyre
|
0f903836f52450bf81216c5dfdfdfebb16090177
|
[
"BSD-3-Clause"
] | 12
|
2018-04-23T22:50:40.000Z
|
2022-02-20T17:27:23.000Z
|
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis <nichael.aivazis@para-sim.com>
# (c) 1998-2021 all rights reserved
# support
import merlin
# superclass
from .RealAsset import RealAsset
# class declaration
class File(RealAsset, family="merlin.assets.files.file", implements=merlin.protocols.file):
"""
Encapsulation of a file based project asset
"""
# required configurable state
category = merlin.protocols.assetCategory()
category.doc = "a clue about the type of this asset"
language = merlin.protocols.language()
language.doc = "a clue about the toolchain that processes this asset"
# hooks
def identify(self, visitor, **kwds):
"""
Ask {visitor} to process a file based asset
"""
# attempt to
try:
# ask the {visitor} for a handler for my type
handler = visitor.file
# if it doesn't exist
except AttributeError:
# chain up
return super().identify(visitor=visitor, **kwds)
# if it does, invoke it
return handler(file=self, **kwds)
# end of file
| 24.304348
| 91
| 0.628801
|
b2fda40c8ae316678a22e21453ee263fac079bc9
| 2,237
|
py
|
Python
|
ressonantes/core/migrations/0007_auto_20200914_1736.py
|
ag-castro/brazil-ongs-mapping
|
80f7542d437913ad92cd74b6e456760f61be32ad
|
[
"Unlicense"
] | 1
|
2020-09-07T17:33:42.000Z
|
2020-09-07T17:33:42.000Z
|
ressonantes/core/migrations/0007_auto_20200914_1736.py
|
ag-castro/brazil-ongs-mapping
|
80f7542d437913ad92cd74b6e456760f61be32ad
|
[
"Unlicense"
] | null | null | null |
ressonantes/core/migrations/0007_auto_20200914_1736.py
|
ag-castro/brazil-ongs-mapping
|
80f7542d437913ad92cd74b6e456760f61be32ad
|
[
"Unlicense"
] | null | null | null |
# Generated by Django 2.2.15 on 2020-09-14 17:36
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import re
class Migration(migrations.Migration):
dependencies = [
('core', '0006_auto_20200914_1631'),
]
operations = [
migrations.AlterField(
model_name='organization',
name='slug',
field=models.SlugField(auto_created=True, default='zm3g25wvq7lh0latonrxukn2ve6p09u0ri9h05tuv5xyyfik6l2yre8n94v5', help_text='URL de exibição da ONG.', max_length=60, unique=True, validators=[django.core.validators.RegexValidator(re.compile('^[-a-zA-Z0-9_]+\\Z'), "Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens.", 'invalid'), django.core.validators.RegexValidator(re.compile('^[-\\w]+\\Z'), "Enter a valid 'slug' consisting of Unicode letters, numbers, underscores, or hyphens.", 'invalid')], verbose_name='Slug'),
),
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address', models.CharField(max_length=255, verbose_name='Endereço')),
('number', models.IntegerField(blank=True, default=0, null=True, verbose_name='Número')),
('neighborhood', models.CharField(max_length=50, verbose_name='Bairro/Setor')),
('complement', models.CharField(max_length=150, verbose_name='Complemento')),
('cep', models.CharField(max_length=16, verbose_name='CEP')),
('city', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='city_address', to='core.City', verbose_name='Cidade')),
('state', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='state_address', to='core.Uf', verbose_name='Estado/UF')),
],
),
migrations.AddField(
model_name='organization',
name='address',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='core.Address', verbose_name='Endereço'),
),
]
| 55.925
| 555
| 0.660706
|
719527ee2b6485f2b7465d9a3c00df3bb393647f
| 15,046
|
py
|
Python
|
src/pipgrip/libs/mixology/incompatibility.py
|
pombredanne/pipgrip
|
a1e0165d9fc9085052c8a010bedf36daff4133be
|
[
"BSD-3-Clause"
] | 100
|
2020-01-07T13:32:02.000Z
|
2022-03-30T14:35:21.000Z
|
src/pipgrip/libs/mixology/incompatibility.py
|
pombredanne/pipgrip
|
a1e0165d9fc9085052c8a010bedf36daff4133be
|
[
"BSD-3-Clause"
] | 50
|
2020-01-07T13:17:31.000Z
|
2022-03-01T17:55:57.000Z
|
src/pipgrip/libs/mixology/incompatibility.py
|
pombredanne/pipgrip
|
a1e0165d9fc9085052c8a010bedf36daff4133be
|
[
"BSD-3-Clause"
] | 7
|
2020-01-09T23:06:10.000Z
|
2021-04-11T23:57:26.000Z
|
from typing import Dict, Generator, Hashable, List, Optional
from pipgrip.libs.mixology.incompatibility_cause import (
ConflictCause,
DependencyCause,
IncompatibilityCause,
NoVersionsCause,
PackageNotFoundCause,
RootCause,
)
from pipgrip.libs.mixology.package import Package
from pipgrip.libs.mixology.term import Term
class Incompatibility:
def __init__(
self, terms, cause
): # type: (List[Term], IncompatibilityCause) -> None
# Remove the root package from generated incompatibilities, since it will
# always be satisfied. This makes error reporting clearer, and may also
# make solving more efficient.
if (
len(terms) != 1
and isinstance(cause, ConflictCause)
and any(
term.is_positive() and term.package != Package.root() for term in terms
)
):
terms = [
term
for term in terms
if not term.is_positive() or term.package != Package.root()
]
if (
len(terms) == 1
# Short-circuit in the common case of a two-term incompatibility with
# two different packages (for example, a dependency).
or len(terms) == 2
and terms[0].package != terms[-1].package
):
pass
else:
# Coalesce multiple terms about the same package if possible.
by_name = {} # type: Dict[Hashable, Dict[Hashable, Term]]
for term in terms:
if term.package not in by_name:
by_name[term.package] = {}
by_ref = by_name[term.package]
ref = term.package
if ref in by_ref:
by_ref[ref] = by_ref[ref].intersect(term)
# If we have two terms that refer to the same package but have a null
# intersection, they're mutually exclusive, making this incompatibility
# irrelevant, since we already know that mutually exclusive version
# ranges are incompatible. We should never derive an irrelevant
# incompatibility.
assert by_ref[ref] is not None
else:
by_ref[ref] = term
new_terms = []
for by_ref in by_name.values():
positive_terms = [
term for term in by_ref.values() if term.is_positive()
]
if positive_terms:
new_terms += positive_terms
continue
new_terms += list(by_ref.values())
terms = new_terms
self._terms = terms
self._cause = cause
@property
def terms(self): # type: () -> List[Term]
return self._terms
@property
def cause(self): # type: () -> IncompatibilityCause
return self._cause
@property
def external_incompatibilities(self): # type: () -> Generator[Incompatibility]
"""
Returns all external incompatibilities in this incompatibility's
derivation graph.
"""
if isinstance(self._cause, ConflictCause):
cause = self._cause # type: ConflictCause
for incompatibility in cause.conflict.external_incompatibilities:
yield incompatibility
for incompatibility in cause.other.external_incompatibilities:
yield incompatibility
else:
yield self
def is_failure(self): # type: () -> bool
return len(self._terms) == 0 or (
len(self._terms) == 1
and self._terms[0].package == Package.root()
and self._terms[0].is_positive()
)
def handle_cause(self): # type: () -> Optional[str]
if isinstance(self._cause, DependencyCause):
assert len(self._terms) == 2
depender = self._terms[0]
dependee = self._terms[1]
assert depender.is_positive()
assert not dependee.is_positive()
return "{} depends on {}".format(
depender.to_string(allow_every=True), dependee.inverse
)
elif isinstance(self._cause, NoVersionsCause):
assert len(self._terms) == 1
assert self._terms[0].is_positive()
return "no versions of {} match {}".format(
self._terms[0].package, self._terms[0].constraint.constraint
)
elif isinstance(self._cause, PackageNotFoundCause):
assert len(self._terms) == 1
assert self._terms[0].is_positive()
return "{} doesn't exist".format(self._terms[0].package)
elif isinstance(self._cause, RootCause):
assert len(self._terms) == 1
assert not self._terms[0].is_positive()
assert self._terms[0].package == Package.root()
return "{} is {}".format(self._terms[0].package, self._terms[0].constraint)
def __str__(self):
cause_string = self.handle_cause()
if cause_string is not None:
return cause_string
elif self.is_failure():
return "version solving failed"
if len(self._terms) == 1:
term = self._terms[0]
if term.constraint.is_any():
return "{} is {}".format(
term.package, "forbidden" if term.is_positive() else "required"
)
else:
return "{} is {}".format(
term.package, "forbidden" if term.is_positive() else "required"
)
if len(self._terms) == 2:
term1 = self._terms[0]
term2 = self._terms[1]
if term1.is_positive() == term2.is_positive():
if term1.is_positive():
package1 = (
term1.package
if term1.constraint.is_any()
else self._terse(term1)
)
package2 = (
term2.package
if term2.constraint.is_any()
else self._terse(term2)
)
return "{} is incompatible with {}".format(package1, package2)
else:
return "either {} or {}".format(
self._terse(term1), self._terse(term2)
)
positive = []
negative = []
for term in self._terms:
if term.is_positive():
positive.append(term.to_string(allow_every=True))
else:
negative.append(term.inverse.to_string())
if positive and negative:
if len(positive) == 1:
positive_term = [term for term in self._terms if term.is_positive()][0]
return "{} requires {}".format(
positive_term.to_string(allow_every=True), " or ".join(negative)
)
else:
return "if {} then {}".format(
" and ".join(positive), " or ".join(negative)
)
elif positive:
return "one of {} must be false".format(" or ".join(positive))
else:
return "one of {} must be true".format(" or ".join(negative))
def and_to_string(
self, other, details, this_line, other_line
): # type: (Incompatibility, dict, int, int) -> str
requires_both = self._try_requires_both(other, details, this_line, other_line)
if requires_both is not None:
return requires_both
requires_through = self._try_requires_through(
other, details, this_line, other_line
)
if requires_through is not None:
return requires_through
requires_forbidden = self._try_requires_forbidden(
other, details, this_line, other_line
)
if requires_forbidden is not None:
return requires_forbidden
buffer = [str(self)]
if this_line is not None:
buffer.append(" " + this_line)
buffer.append(" and {}".format(str(other)))
if other_line is not None:
buffer.append(" " + other_line)
return "\n".join(buffer)
def _try_requires_both(
self, other, details, this_line, other_line
): # type: (Incompatibility, dict, int, int) -> Optional[str]
if len(self._terms) == 1 or len(other.terms) == 1:
return
this_positive = self._single_term_where(lambda term: term.is_positive())
if this_positive is None:
return
other_positive = other._single_term_where(lambda term: term.is_positive())
if other_positive is None:
return
if this_positive.package != other_positive.package:
return
this_negatives = " or ".join(
[term.inverse.to_string() for term in self._terms if not term.is_positive()]
)
other_negatives = " or ".join(
[term.inverse.to_string() for term in other.terms if not term.is_positive()]
)
buffer = [self._terse(this_positive, allow_every=True) + " "]
is_dependency = isinstance(self.cause, DependencyCause) and isinstance(
other.cause, DependencyCause
)
if is_dependency:
buffer.append("depends on")
else:
buffer.append("requires")
buffer.append(" both {}".format(this_negatives))
if this_line is not None:
buffer.append(" ({})".format(this_line))
buffer.append(" and {}".format(other_negatives))
if other_line is not None:
buffer.append(" ({})".format(other_line))
return "".join(buffer)
def _try_requires_through(
self, other, details, this_line, other_line
): # type: (Incompatibility, dict, int, int) -> Optional[str]
if len(self._terms) == 1 or len(other.terms) == 1:
return
this_negative = self._single_term_where(lambda term: not term.is_positive())
other_negative = other._single_term_where(lambda term: not term.is_positive())
if this_negative is None and other_negative is None:
return
this_positive = self._single_term_where(lambda term: term.is_positive())
other_positive = self._single_term_where(lambda term: term.is_positive())
if (
this_negative is not None
and other_positive is not None
and this_negative.package == other_positive.package
and this_negative.inverse.satisfies(other_positive)
):
prior = self
prior_negative = this_negative
prior_line = this_line
latter = other
latter_line = other_line
elif (
other_negative is not None
and this_positive is not None
and other_negative.package == this_positive.package
and other_negative.inverse.satisfies(this_positive)
):
prior = other
prior_negative = other_negative
prior_line = other_line
latter = self
latter_line = this_line
else:
return
prior_positives = [term for term in prior.terms if term.is_positive()]
buffer = []
if len(prior_positives) > 1:
prior_string = " or ".join([self._terse(term) for term in prior_positives])
buffer.append("if {} then ".format(prior_string))
else:
if isinstance(prior.cause, DependencyCause):
verb = "depends on"
else:
verb = "requires"
buffer.append(
"{} {} ".format(prior_positives[0].to_string(allow_every=True), verb)
)
buffer.append(prior_negative.inverse.to_string())
if prior_line is not None:
buffer.append(" ({})".format(prior_line))
buffer.append(" which ")
if isinstance(latter.cause, DependencyCause):
buffer.append("depends on ")
else:
buffer.append("requires ")
buffer.append(
" or ".join(
[
term.inverse.to_string()
for term in latter.terms
if not term.is_positive()
]
)
)
if latter_line is not None:
buffer.append(" ({})".format(latter_line))
return "".join(buffer)
def _try_requires_forbidden(
self, other, details, this_line, other_line
): # type: (Incompatibility, dict, int, int) -> Optional[str]
if len(self._terms) != 1 and len(other.terms) != 1:
return None
if len(self.terms) == 1:
prior = other
latter = self
prior_line = other_line
latter_line = this_line
else:
prior = self
latter = other
prior_line = this_line
latter_line = other_line
negative = prior._single_term_where(lambda term: not term.is_positive())
if negative is None:
return
if not negative.inverse.satisfies(latter.terms[0]):
return
positives = [t for t in prior.terms if t.is_positive()]
buffer = []
if len(positives) > 1:
prior_string = " or ".join([self._terse(term) for term in positives])
buffer.append("if {} then ".format(prior_string))
else:
buffer.append(self._terse(positives[0], allow_every=True))
if isinstance(prior.cause, DependencyCause):
buffer.append(" depends on ")
else:
buffer.append(" requires ")
buffer.append(latter.terms[0].to_string(allow_every=True) + " ")
if prior_line is not None:
buffer.append("({}) ".format(prior_line))
if isinstance(latter.cause, NoVersionsCause):
buffer.append("which doesn't match any versions")
elif isinstance(latter.cause, PackageNotFoundCause):
buffer.append("which doesn't exist")
else:
buffer.append("which is forbidden")
if latter_line is not None:
buffer.append(" ({})".format(latter_line))
return "".join(buffer)
def _terse(self, term, allow_every=False): # type: (Term, bool) -> str
return term.to_string(allow_every=allow_every)
def _single_term_where(self, callable_): # type: (callable) -> Optional[Term]
found = None
for term in self._terms:
if not callable_(term):
continue
if found is not None:
return
found = term
return found
def __repr__(self):
return "<{} {}>".format(self.__class__.__name__, str(self))
| 34.351598
| 91
| 0.550578
|
133005602ef2dff85f9a89d3cf0e55daf4a0aef4
| 13,944
|
py
|
Python
|
src/Products/ZCTextIndex/ZCTextIndex.py
|
zopefoundation/Products.ZCatalog
|
d4fd9fe28d27e8ff43e911025c1258f1e8d50ad5
|
[
"ZPL-2.1"
] | 4
|
2018-09-13T22:10:22.000Z
|
2019-06-15T08:26:52.000Z
|
src/Products/ZCTextIndex/ZCTextIndex.py
|
zopefoundation/Products.ZCatalog
|
d4fd9fe28d27e8ff43e911025c1258f1e8d50ad5
|
[
"ZPL-2.1"
] | 97
|
2015-02-05T11:58:41.000Z
|
2022-02-08T21:34:11.000Z
|
src/Products/ZCTextIndex/ZCTextIndex.py
|
zopefoundation/Products.ZCatalog
|
d4fd9fe28d27e8ff43e911025c1258f1e8d50ad5
|
[
"ZPL-2.1"
] | 12
|
2015-04-03T05:30:24.000Z
|
2019-08-12T21:50:21.000Z
|
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Plug in text index for ZCatalog with relevance ranking.
"""
try:
from html import escape
except ImportError:
from cgi import escape
from AccessControl.class_init import InitializeClass
from AccessControl.Permissions import manage_vocabulary
from AccessControl.Permissions import manage_zcatalog_indexes
from AccessControl.Permissions import query_vocabulary
from AccessControl.Permissions import search_zcatalog
from AccessControl.SecurityInfo import ClassSecurityInfo
from Acquisition import Implicit
from Acquisition import aq_base
from Acquisition import aq_inner
from Acquisition import aq_parent
from App.special_dtml import DTMLFile
from OFS.SimpleItem import SimpleItem
from Persistence import Persistent
from zope.interface import implementer
from Products.PluginIndexes.interfaces import IPluggableIndex
from Products.PluginIndexes.interfaces import IQueryIndex
from Products.PluginIndexes.util import safe_callable
from Products.ZCatalog.query import IndexQuery
from Products.ZCTextIndex.CosineIndex import CosineIndex
from Products.ZCTextIndex.interfaces import ILexicon
from Products.ZCTextIndex.interfaces import IZCLexicon
from Products.ZCTextIndex.interfaces import IZCTextIndex
from Products.ZCTextIndex.Lexicon import Lexicon
from Products.ZCTextIndex.NBest import NBest
from Products.ZCTextIndex.OkapiIndex import OkapiIndex
from Products.ZCTextIndex.PipelineFactory import element_factory
from Products.ZCTextIndex.QueryParser import QueryParser
index_types = {'Okapi BM25 Rank': OkapiIndex,
'Cosine Measure': CosineIndex}
try:
basestring
except NameError:
basestring = str
@implementer(IZCTextIndex, IQueryIndex, IPluggableIndex)
class ZCTextIndex(Persistent, Implicit, SimpleItem):
"""Persistent text index.
"""
meta_type = 'ZCTextIndex'
zmi_icon = 'fas fa-list'
operators = ('and', 'or')
useOperator = 'or'
query_options = ('query', )
manage_options = (
{'label': 'Overview', 'action': 'manage_main'},
)
security = ClassSecurityInfo()
security.declareObjectProtected(manage_zcatalog_indexes)
def __init__(self, id, extra=None, caller=None, index_factory=None,
field_name=None, lexicon_id=None):
self.id = id
# Arguments can be passed directly to the constructor or
# via the silly "extra" record.
self._fieldname = field_name or getattr(extra, 'doc_attr', '') or id
self._indexed_attrs = self._fieldname.split(',')
self._indexed_attrs = [attr.strip()
for attr in self._indexed_attrs if attr]
lexicon_id = lexicon_id or getattr(extra, 'lexicon_id', '')
lexicon = getattr(caller, lexicon_id, None)
if lexicon is None:
raise LookupError('Lexicon "%s" not found' % escape(lexicon_id))
if not ILexicon.providedBy(lexicon):
raise ValueError('Object "%s" does not implement '
'ZCTextIndex Lexicon interface'
% lexicon.getId())
self.lexicon_id = lexicon.getId()
self._v_lexicon = lexicon
if index_factory is None:
if extra.index_type not in index_types.keys():
raise ValueError('Invalid index type "%s"' % escape(
extra.index_type))
self._index_factory = index_types[extra.index_type]
self._index_type = extra.index_type
else:
self._index_factory = index_factory
self.index = self._index_factory(aq_base(self.getLexicon()))
@security.private
def getLexicon(self):
"""Get the lexicon for this index."""
if hasattr(aq_base(self), 'lexicon'):
# Fix up old ZCTextIndexes by removing direct lexicon ref
# and changing it to an ID
lexicon = getattr(aq_parent(aq_inner(self)), self.lexicon.getId())
self.lexicon_id = lexicon.getId()
del self.lexicon
if getattr(aq_base(self), 'lexicon_path', None):
# Fix up slightly less old ZCTextIndexes by removing
# the physical path and changing it to an ID.
# There's no need to use a physical path, which otherwise
# makes it difficult to move or rename ZCatalogs.
self.lexicon_id = self.lexicon_path[-1]
del self.lexicon_path
try:
return self._v_lexicon
except AttributeError:
lexicon = getattr(aq_parent(aq_inner(self)), self.lexicon_id)
if not ILexicon.providedBy(lexicon):
raise TypeError('Object "%s" is not a ZCTextIndex Lexicon'
% repr(lexicon))
self._v_lexicon = lexicon
return lexicon
# External methods not in the Pluggable Index API
@security.protected(search_zcatalog)
def query(self, query, nbest=10):
"""Return pair (mapping from docids to scores, num results).
The num results is the total number of results before trimming
to the nbest results.
"""
tree = QueryParser(self.getLexicon()).parseQuery(query)
results = tree.executeQuery(self.index)
if results is None:
return [], 0
chooser = NBest(nbest)
chooser.addmany(results.items())
return chooser.getbest(), len(results)
# Pluggable Index APIs
def index_object(self, documentId, obj, threshold=None):
"""Wrapper for index_doc() handling indexing of multiple attributes.
Enter the document with the specified documentId in the index
under the terms extracted from the indexed text attributes,
each of which should yield either a string or a list of
strings (Unicode or otherwise) to be passed to index_doc().
"""
# TODO we currently ignore subtransaction threshold
# needed for backward compatibility
fields = getattr(self, '_indexed_attrs', [self._fieldname])
all_texts = []
for attr in fields:
text = getattr(obj, attr, None)
if text is None:
continue
if safe_callable(text):
text = text()
if text is not None:
if isinstance(text, (list, tuple, set)):
all_texts.extend(text)
else:
all_texts.append(text)
# Check that we're sending only strings
all_texts = [t for t in all_texts if isinstance(t, basestring)]
if all_texts:
return self.index.index_doc(documentId, all_texts)
return 0
def unindex_object(self, docid):
if self.index.has_doc(docid):
self.index.unindex_doc(docid)
def _apply_index(self, request):
record = IndexQuery(request, self.id, self.query_options)
if record.keys is None:
return None
return (self.query_index(record), (self.id, ))
def query_index(self, record, resultset=None):
query_str = ' '.join(record.keys)
if not query_str:
return None
tree = QueryParser(self.getLexicon()).parseQuery(query_str)
results = tree.executeQuery(self.index)
return results
def getEntryForObject(self, documentId, default=None):
"""Return the list of words indexed for documentId"""
try:
word_ids = self.index.get_words(documentId)
except KeyError:
return default
get_word = self.getLexicon().get_word
return [get_word(wid) for wid in word_ids]
def uniqueValues(self, name=None, withLengths=0):
raise NotImplementedError
# The ZCatalog Index management screen uses these methods
def numObjects(self):
"""Return number of unique words in the index"""
return self.index.length()
def indexSize(self):
"""Return the number of indexes objects """
return self.index.document_count()
def clear(self):
"""reinitialize the index (but not the lexicon)"""
try:
# Remove the cached reference to the lexicon
# So that it is refreshed
del self._v_lexicon
except (AttributeError, KeyError):
pass
self.index = self._index_factory(aq_base(self.getLexicon()))
# User Interface Methods
manage_main = DTMLFile('dtml/manageZCTextIndex', globals())
def getIndexSourceNames(self):
"""Return sequence of names of indexed attributes"""
try:
return self._indexed_attrs
except Exception:
return [self._fieldname]
def getIndexQueryNames(self):
"""Indicate that this index applies to queries for the index's name."""
return (self.id,)
def getIndexType(self):
"""Return index type string"""
return getattr(self, '_index_type', self._index_factory.__name__)
def getLexiconURL(self):
"""Return the url of the lexicon used by the index"""
try:
lex = self.getLexicon()
except (KeyError, AttributeError):
return None
else:
return lex.absolute_url()
InitializeClass(ZCTextIndex)
def manage_addZCTextIndex(self, id, extra=None, REQUEST=None,
RESPONSE=None):
"""Add a text index"""
if REQUEST is None:
URL3 = None
else:
URL3 = REQUEST.URL3
return self.manage_addIndex(id, 'ZCTextIndex', extra,
REQUEST, RESPONSE, URL3)
manage_addZCTextIndexForm = DTMLFile('dtml/addZCTextIndex', globals())
manage_addLexiconForm = DTMLFile('dtml/addLexicon', globals())
def manage_addLexicon(self, id, title='', elements=[], REQUEST=None):
"""Add ZCTextIndex Lexicon"""
pipeline = []
for el_record in elements:
if not hasattr(el_record, 'name'):
continue # Skip over records that only specify element group
element = element_factory.instantiate(el_record.group, el_record.name)
if element is not None:
if el_record.group == 'Word Splitter':
# I don't like hardcoding this, but its a simple solution
# to get the splitter element first in the pipeline
pipeline.insert(0, element)
else:
pipeline.append(element)
lexicon = PLexicon(id, title, *pipeline)
self._setObject(id, lexicon)
if REQUEST is not None:
return self.manage_main(self, REQUEST, update_menu=1)
# I am borrowing the existing vocabulary permissions for now to avoid
# adding new permissions. This may change when old style Vocabs go away
LexiconQueryPerm = query_vocabulary
LexiconMgmtPerm = manage_vocabulary
@implementer(IZCLexicon)
class PLexicon(Lexicon, Implicit, SimpleItem):
"""Lexicon for ZCTextIndex.
"""
meta_type = 'ZCTextIndex Lexicon'
zmi_icon = 'fas fa-book'
manage_options = ({'label': 'Overview', 'action': 'manage_main'},
{'label': 'Query', 'action': 'queryLexicon'},
) + SimpleItem.manage_options
security = ClassSecurityInfo()
security.declareObjectProtected(LexiconQueryPerm)
def __init__(self, id, title='', *pipeline):
self.id = str(id)
self.title = str(title)
PLexicon.inheritedAttribute('__init__')(self, *pipeline)
# User Interface Methods
def getPipelineNames(self):
"""Return list of names of pipeline element classes"""
return [element.__class__.__name__ for element in self._pipeline]
_queryLexicon = DTMLFile('dtml/queryLexicon', globals())
@security.protected(LexiconQueryPerm)
def queryLexicon(self, REQUEST, words=None, page=0, rows=20, cols=4):
"""Lexicon browser/query user interface
"""
if words:
wids = []
for word in self.parseTerms(words):
wids.extend(self.globToWordIds(word))
words = [self.get_word(wid) for wid in wids]
else:
words = self.words()
word_count = len(words)
rows = max(min(rows, 500), 1)
cols = max(min(cols, 12), 1)
page_count = (word_count
/ (rows * cols)
+ (word_count % (rows * cols) > 0))
page = max(min(page, page_count - 1), 0)
start = rows * cols * page
end = min(rows * cols * (page + 1), word_count)
if word_count:
words = list(words[start:end])
else:
words = []
columns = []
i = 0
while i < len(words):
columns.append(words[i:i + rows])
i += rows
info = dict(page=page,
rows=rows,
cols=cols,
start_word=start + 1,
end_word=end,
word_count=word_count,
page_count=page_count,
page_range=range(int(page_count)),
page_columns=columns)
if REQUEST is not None:
return self._queryLexicon(self, REQUEST, **info)
return info
security.declareProtected(LexiconMgmtPerm, 'manage_main')
manage_main = DTMLFile('dtml/manageLexicon', globals())
InitializeClass(PLexicon)
| 34.600496
| 79
| 0.629733
|
b513703ce8b9cb120229facbdd751c47eb916636
| 4,245
|
py
|
Python
|
examples/python/reconstruction_system/integrate_scene.py
|
amoran-symbio/Open3D
|
ae7e44e0dcef11a5df763819d47dec8c5bd5294b
|
[
"MIT"
] | 1,455
|
2021-07-27T19:44:50.000Z
|
2022-03-31T19:39:21.000Z
|
examples/python/reconstruction_system/integrate_scene.py
|
amoran-symbio/Open3D
|
ae7e44e0dcef11a5df763819d47dec8c5bd5294b
|
[
"MIT"
] | 1,439
|
2021-07-27T16:02:52.000Z
|
2022-03-31T22:29:05.000Z
|
examples/python/reconstruction_system/integrate_scene.py
|
amoran-symbio/Open3D
|
ae7e44e0dcef11a5df763819d47dec8c5bd5294b
|
[
"MIT"
] | 339
|
2021-07-28T03:07:28.000Z
|
2022-03-31T13:38:00.000Z
|
# ----------------------------------------------------------------------------
# - Open3D: www.open3d.org -
# ----------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2018-2021 www.open3d.org
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# ----------------------------------------------------------------------------
# examples/python/reconstruction_system/integrate_scene.py
import numpy as np
import math
import os, sys
import open3d as o3d
pyexample_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(pyexample_path)
from utility.file import *
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from make_fragments import read_rgbd_image
def scalable_integrate_rgb_frames(path_dataset, intrinsic, config):
poses = []
[color_files, depth_files] = get_rgbd_file_lists(path_dataset)
n_files = len(color_files)
n_fragments = int(math.ceil(float(n_files) / \
config['n_frames_per_fragment']))
volume = o3d.pipelines.integration.ScalableTSDFVolume(
voxel_length=config["tsdf_cubic_size"] / 512.0,
sdf_trunc=0.04,
color_type=o3d.pipelines.integration.TSDFVolumeColorType.RGB8)
pose_graph_fragment = o3d.io.read_pose_graph(
join(path_dataset, config["template_refined_posegraph_optimized"]))
for fragment_id in range(len(pose_graph_fragment.nodes)):
pose_graph_rgbd = o3d.io.read_pose_graph(
join(path_dataset,
config["template_fragment_posegraph_optimized"] % fragment_id))
for frame_id in range(len(pose_graph_rgbd.nodes)):
frame_id_abs = fragment_id * \
config['n_frames_per_fragment'] + frame_id
print(
"Fragment %03d / %03d :: integrate rgbd frame %d (%d of %d)." %
(fragment_id, n_fragments - 1, frame_id_abs, frame_id + 1,
len(pose_graph_rgbd.nodes)))
rgbd = read_rgbd_image(color_files[frame_id_abs],
depth_files[frame_id_abs], False, config)
pose = np.dot(pose_graph_fragment.nodes[fragment_id].pose,
pose_graph_rgbd.nodes[frame_id].pose)
volume.integrate(rgbd, intrinsic, np.linalg.inv(pose))
poses.append(pose)
mesh = volume.extract_triangle_mesh()
mesh.compute_vertex_normals()
if config["debug_mode"]:
o3d.visualization.draw_geometries([mesh])
mesh_name = join(path_dataset, config["template_global_mesh"])
o3d.io.write_triangle_mesh(mesh_name, mesh, False, True)
traj_name = join(path_dataset, config["template_global_traj"])
write_poses_to_log(traj_name, poses)
def run(config):
print("integrate the whole RGBD sequence using estimated camera pose.")
if config["path_intrinsic"]:
intrinsic = o3d.io.read_pinhole_camera_intrinsic(
config["path_intrinsic"])
else:
intrinsic = o3d.camera.PinholeCameraIntrinsic(
o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault)
scalable_integrate_rgb_frames(config["path_dataset"], intrinsic, config)
| 43.762887
| 80
| 0.664782
|
7cc3ad5db5f22b4851e53eee76115c2d63960547
| 3,058
|
py
|
Python
|
models/bilibili.py
|
shunkoucho/bilibili-downloader
|
a82fe81d42f49b658163083e3dd837b7ca949bf6
|
[
"MIT"
] | 8
|
2021-03-15T01:14:36.000Z
|
2022-02-10T08:54:36.000Z
|
models/bilibili.py
|
shunkoucho/bilibili-downloader
|
a82fe81d42f49b658163083e3dd837b7ca949bf6
|
[
"MIT"
] | 1
|
2021-07-03T01:42:36.000Z
|
2021-07-05T05:49:02.000Z
|
models/bilibili.py
|
shunkoucho/bilibili-downloader
|
a82fe81d42f49b658163083e3dd837b7ca949bf6
|
[
"MIT"
] | 2
|
2021-02-21T02:56:17.000Z
|
2022-02-10T08:54:46.000Z
|
from bs4 import BeautifulSoup
import config
import requests
import time
import json
import re
class BilibiliInfo:
'''获取视频和音频相关信息类'''
def __init__(self, url):
# 视频页地址
self.url = url
def get_video_page(self):
response = requests.get(
url=self.url,
headers={
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 Safari/537.36 Edg/88.0.705.68',
'cookie': config.COOKIE
},
)
if response.status_code == 200:
return response.text
raise Exception('failed')
def get_video_quality(self, id):
'''
id参数对应视频清晰度,非会员用户只能下载到高清1080P,这里默认可以下载的最高的清晰度
- 120:超清 4K (需要大会员的cookie中的SESSDATA值)
- 116: 高清 1080P60 (需要大会员)
- 112: 高清 1080P+ (需要大会员)
- 80: 高清 1080P
- 74: 高清 720P60 (需要大会员)
- 64: 高清 720P
- 32: 清晰 480P
- 16: 流畅 360P
'''
quality = {
120: '超清 4K',
116: '高清 1080P60',
112: '高清 1080P+',
80: '高清 1080P',
74: '高清 720P60',
64: '高清 720P',
32: '清晰 480P',
16: '流畅 360P',
}
print(f'下载的视频清晰度:{quality[id]}')
def get_video_info(self, page):
try:
bs = BeautifulSoup(page, 'html.parser')
# 取视频标题
video_title = bs.find('span', 'tit').get_text()
# 取视频链接
pattern = re.compile(
r"window\.__playinfo__=(.*?)$", re.MULTILINE | re.DOTALL)
script = bs.find("script", text=pattern)
result = pattern.search(script.next).group(1)
temp = json.loads(result)
# 取第一个视频链接
for item in temp['data']['dash']['video']:
if 'baseUrl' in item.keys():
video_url = item['baseUrl']
self.get_video_quality(item['id'])
break
return {
'title': video_title,
'video_url': video_url
}
except requests.RequestException:
print('视频链接错误,请重新更换')
exit(1)
def get_audio_info(self, page):
try:
bs = BeautifulSoup(page, "html.parser")
# 获取音频标题
audio_title = bs.find('span', 'tit').get_text()
# 获取音频连接
pattern = re.compile(r'window\.__playinfo__=(.*?)$')
script = bs.find('script', text=pattern)
result = pattern.search(script.next).group(1)
temp = json.loads(result)
# 取第一个音频链接
for item in temp['data']['dash']['audio']:
if 'baseUrl' in item.keys():
audio_url = item['baseUrl']
break
return {
'title': audio_title,
'audio_url': audio_url
}
except requests.RequestException:
print('音频链接错误,请重新更换')
exit(1)
| 27.303571
| 164
| 0.485939
|
da287bafc7da5b30f5105df70480aaf25ae82309
| 661
|
py
|
Python
|
odoo-14.0/addons/sale_coupon/__manifest__.py
|
Yomy1996/P1
|
59e24cdd5f7f82005fe15bd7a7ff54dd5364dd29
|
[
"CC-BY-3.0"
] | null | null | null |
odoo-14.0/addons/sale_coupon/__manifest__.py
|
Yomy1996/P1
|
59e24cdd5f7f82005fe15bd7a7ff54dd5364dd29
|
[
"CC-BY-3.0"
] | null | null | null |
odoo-14.0/addons/sale_coupon/__manifest__.py
|
Yomy1996/P1
|
59e24cdd5f7f82005fe15bd7a7ff54dd5364dd29
|
[
"CC-BY-3.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': "Sale Coupon",
'summary': "Use discount coupons in sales orders",
'description': """Integrate coupon mechanism in sales orders.""",
'category': 'Sales/Sales',
'version': '1.0',
'depends': ['coupon', 'sale'],
'data': [
'security/sale_coupon_security.xml',
'security/ir.model.access.csv',
'wizard/sale_coupon_apply_code_views.xml',
'views/sale_order_views.xml',
'views/coupon_views.xml',
'views/coupon_program_views.xml',
'views/res_config_settings_views.xml',
],
}
| 33.05
| 74
| 0.626324
|
b4ef3d9b554a33c43718d29b20020cecdf23b951
| 1,176
|
py
|
Python
|
main.py
|
MrWormsy/polychess
|
534b0120686c2dc8d0b8c5ef9fc1fe14e607261e
|
[
"MIT"
] | null | null | null |
main.py
|
MrWormsy/polychess
|
534b0120686c2dc8d0b8c5ef9fc1fe14e607261e
|
[
"MIT"
] | null | null | null |
main.py
|
MrWormsy/polychess
|
534b0120686c2dc8d0b8c5ef9fc1fe14e607261e
|
[
"MIT"
] | 3
|
2018-12-10T07:20:01.000Z
|
2018-12-19T08:41:12.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 10 09:35:35 2018
@authors: MrWormsy (AKA Antonin ROSA-MARTIN), Loick Combrie, Lucile Delage and David Petit
"""
from ModeJoueurContreJoueur import ModeJoueurContreJoueur
from ModeJoueurContreOrdinateur import ModeJoueurContreOrdinateur
from ModeOrdinateurContreOrdinateur import ModeOrdinateurContreOrdinateur
def main():
choixModeDeJeu()
def choixModeDeJeu():
inputStr = input("Mode joueur vs joueur (1) ou Mode joueur vs AI (2) ou Mode AI vs AI (3) : ")
flag = False
while(not flag):
flag = True
if(inputStr == "1"):
modeJcJ = ModeJoueurContreJoueur()
modeJcJ.commencerPartie()
elif(inputStr == "2"):
modeJcO = ModeJoueurContreOrdinateur()
modeJcO.commencerPartie()
elif(inputStr == "3"):
modeOcO = ModeOrdinateurContreOrdinateur()
modeOcO.commencerPartie()
else:
flag = False
inputStr = input("Mode joueur vs joueur (1) ou Mode joueur vs AI (2) ou Mode AI vs AI (3) ou Mode Debug (4) : ")
if __name__ == "__main__":
main()
main()
| 30.153846
| 124
| 0.628401
|
494bb8998b606856b111b2eb8cd7487fd2ad9a73
| 417
|
py
|
Python
|
exercicios/ex029.py
|
luccasocastro/Curso-Python
|
7ad2b980bb2f95f833811291273d6ca1beb0fe77
|
[
"MIT"
] | null | null | null |
exercicios/ex029.py
|
luccasocastro/Curso-Python
|
7ad2b980bb2f95f833811291273d6ca1beb0fe77
|
[
"MIT"
] | null | null | null |
exercicios/ex029.py
|
luccasocastro/Curso-Python
|
7ad2b980bb2f95f833811291273d6ca1beb0fe77
|
[
"MIT"
] | null | null | null |
valor = float(input('Informe o valor da casa: '))
sal = float(input('Informe o seu salário: '))
anos = int(input('Informe em quantos anos deseja pagar: '))
prestação = valor/(anos*12)
valMax = sal * 30 / 100
print('Para pagar uma casa de R${:.2f} em {} anos a prestação será de R${:.2f}'.format(valor, anos, prestação))
if prestação <= valMax:
print('Empréstimo CONCEDIDO!')
else:
print('Empréstimo NEGADO!')
| 37.909091
| 111
| 0.678657
|
ef5f9a6b3240946fd4439d2f1a3f4e92193659d5
| 1,962
|
py
|
Python
|
membership/urls.py
|
darkismus/kompassi
|
35dea2c7af2857a69cae5c5982b48f01ba56da1f
|
[
"CC-BY-3.0"
] | 13
|
2015-11-29T12:19:12.000Z
|
2021-02-21T15:42:11.000Z
|
membership/urls.py
|
darkismus/kompassi
|
35dea2c7af2857a69cae5c5982b48f01ba56da1f
|
[
"CC-BY-3.0"
] | 23
|
2015-04-29T19:43:34.000Z
|
2021-02-10T05:50:17.000Z
|
membership/urls.py
|
darkismus/kompassi
|
35dea2c7af2857a69cae5c5982b48f01ba56da1f
|
[
"CC-BY-3.0"
] | 11
|
2015-09-20T18:59:00.000Z
|
2020-02-07T08:47:34.000Z
|
from django.conf.urls import include, url
from django.views.generic.base import RedirectView
from .views import (
membership_admin_emails_api,
membership_admin_member_view,
membership_admin_members_view,
membership_admin_term_view,
membership_apply_view,
membership_profile_view,
membership_pay_fee_view,
)
urlpatterns = [
url(
r'^organizations/(?P<organization_slug>[a-z0-9-]+)/apply/?$',
membership_apply_view,
name='membership_apply_view'
),
url(
r'^organizations/(?P<organization_slug>[a-z0-9-]+)/admin/members/?$',
membership_admin_members_view,
kwargs=dict(format='screen'),
name='membership_admin_members_view'
),
url(
r'^organizations/(?P<organization_slug>[a-z0-9-]+)/admin/members.(?P<format>html|xlsx|csv)/?$',
membership_admin_members_view,
name='membership_admin_export_view'
),
url(
r'^organizations/(?P<organization_slug>[a-z0-9-]+)/admin/members/(?P<person_id>[0-9]+)/?$',
membership_admin_member_view,
name='membership_admin_member_view'
),
url(
r'^organizations/(?P<organization_slug>[a-z0-9-]+)/admin/term/(?P<term_id>\d+)/?$',
membership_admin_term_view,
name='membership_admin_term_view',
),
url(
r'^organizations/(?P<organization_slug>[a-z0-9-]+)/admin/term/?$',
membership_admin_term_view,
name='membership_admin_new_term_view',
),
url(
r"^organizations/(?P<organization_slug>[a-z0-9-]+)/fee/?$",
membership_pay_fee_view,
name="membership_pay_fee_view",
),
url(
r'^profile/organizations/?$',
membership_profile_view,
name='membership_profile_view'
),
url(
r'^api/v1/organizations/(?P<organization_slug>[a-z0-9-]+)/members/emails.txt$',
membership_admin_emails_api,
name='membership_admin_emails_api'
),
]
| 28.028571
| 103
| 0.644241
|
76127837868abf5fffdb9d84808bf2ac6f8f1be7
| 2,719
|
py
|
Python
|
deeptrack/test/test_elementwise.py
|
HarshithBachimanchi/DeepTrack-2.0
|
5983f5224b75aef4ce3932662bd15723f13841a0
|
[
"MIT"
] | 1
|
2022-03-18T17:25:18.000Z
|
2022-03-18T17:25:18.000Z
|
deeptrack/test/test_elementwise.py
|
HarshithBachimanchi/DeepTrack-2.0
|
5983f5224b75aef4ce3932662bd15723f13841a0
|
[
"MIT"
] | null | null | null |
deeptrack/test/test_elementwise.py
|
HarshithBachimanchi/DeepTrack-2.0
|
5983f5224b75aef4ce3932662bd15723f13841a0
|
[
"MIT"
] | null | null | null |
import sys
# sys.path.append(".") # Adds the module to path
import unittest
import operator
import itertools
from numpy.core.numeric import array_equal
from numpy.testing._private.utils import assert_almost_equal
from .. import elementwise, features, Image
import numpy as np
from deeptrack.backend._config import cupy as cp
import numpy.testing
import inspect
def grid_test_features(
tester,
feature,
feature_inputs,
expected_result_function,
):
for f_a_input in feature_inputs:
inp = features.Value(f_a_input)
f_a = feature(inp)
f_b = inp >> feature()
for f in [f_a, f_b]:
try:
output = f()
except Exception as e:
tester.assertRaises(
type(e),
lambda: expected_result_function(f_a_input),
)
continue
expected_result = expected_result_function(f_a_input)
output = np.array(output)
try:
expected_result = np.array(expected_result)
except TypeError:
expected_result = expected_result.get()
if isinstance(output, list) and isinstance(expected_result, list):
[
np.testing.assert_almost_equal(np.array(a), np.array(b))
for a, b in zip(output, expected_result)
]
else:
is_equal = np.allclose(
np.array(output), np.array(expected_result), equal_nan=True
)
tester.failIf(
not is_equal,
"Feature output {} is not equal to expect result {}.\n Using arguments {}".format(
output, expected_result, f_a_input
),
)
def create_test(cl):
testname = "test_{}".format(cl.__name__)
def test(self):
grid_test_features(
self,
cl,
[
-1,
0,
1,
(np.random.rand(50, 500) - 0.5) * 100,
(cp.random.rand(50, 500) - 0.5) * 100,
],
np.__dict__[cl.__name__.lower()],
)
test.__name__ = testname
return testname, test
class TestFeatures(unittest.TestCase):
pass
classes = inspect.getmembers(elementwise, inspect.isclass)
for clname, cl in classes:
if not issubclass(cl, elementwise.ElementwiseFeature) or (
cl is elementwise.ElementwiseFeature
):
continue
testname, test_method = create_test(cl)
setattr(TestFeatures, testname, test_method)
if __name__ == "__main__":
unittest.main()
| 24.276786
| 102
| 0.554248
|
c6706de006d02a680aa2c81bf235abfa7f5d3cdf
| 1,046
|
py
|
Python
|
coroutines/copipe.py
|
alick97/python_train
|
e84fac3e2a1355336c6d74fa5c103abebf701a7e
|
[
"MIT"
] | null | null | null |
coroutines/copipe.py
|
alick97/python_train
|
e84fac3e2a1355336c6d74fa5c103abebf701a7e
|
[
"MIT"
] | null | null | null |
coroutines/copipe.py
|
alick97/python_train
|
e84fac3e2a1355336c6d74fa5c103abebf701a7e
|
[
"MIT"
] | null | null | null |
# copipe.py
#
# A simple example showing how to hook up a pipeline with
# coroutines. To run this, you will need a log file.
# Run the program logsim.py in the background to get a data
# source.
from coroutine import coroutine
# A data source. This is not a coroutine, but it sends
# data into one (target)
import time
def follow(thefile, target):
thefile.seek(0,2) # Go to the end of the file
while True:
line = thefile.readline()
if not line:
time.sleep(0.1) # Sleep briefly
continue
target.send(line)
# A filter.
@coroutine
def grep(pattern, target):
while True:
line = (yield) # Receive a line
if pattern in line:
target.send(line) # Send to next stage
# A sink. A coroutine that receives data
@coroutine
def printer():
while True:
line = (yield)
print(line)
# Example use
if __name__ == '__main__':
f = open("access-log.txt")
follow(f,
grep('python',
printer()))
| 22.73913
| 59
| 0.602294
|
f143b9c85bed4958835512fe996866c23df3f920
| 6,164
|
py
|
Python
|
ding/data/buffer/middleware/priority.py
|
davide97l/DI-engine
|
d48c93bcd5c07c29f2ce4ac1b7756b8bc255c423
|
[
"Apache-2.0"
] | 1
|
2022-03-21T16:15:39.000Z
|
2022-03-21T16:15:39.000Z
|
ding/data/buffer/middleware/priority.py
|
jiaruonan/DI-engine
|
268d77db3cb54401b2cfc83e2bc3ec87c31e7b83
|
[
"Apache-2.0"
] | null | null | null |
ding/data/buffer/middleware/priority.py
|
jiaruonan/DI-engine
|
268d77db3cb54401b2cfc83e2bc3ec87c31e7b83
|
[
"Apache-2.0"
] | null | null | null |
from typing import Callable, Any, List, Dict, Optional, Union, TYPE_CHECKING
import copy
import numpy as np
import torch
from ding.utils import SumSegmentTree, MinSegmentTree
from ding.data.buffer.buffer import BufferedData
if TYPE_CHECKING:
from ding.data.buffer.buffer import Buffer
class PriorityExperienceReplay:
def __init__(
self,
buffer: 'Buffer',
IS_weight: bool = True,
priority_power_factor: float = 0.6,
IS_weight_power_factor: float = 0.4,
IS_weight_anneal_train_iter: int = int(1e5),
) -> None:
self.buffer = buffer
self.buffer_idx = {}
self.buffer_size = buffer.size
self.IS_weight = IS_weight
self.priority_power_factor = priority_power_factor
self.IS_weight_power_factor = IS_weight_power_factor
self.IS_weight_anneal_train_iter = IS_weight_anneal_train_iter
# Max priority till now, it's used to initizalize data's priority if "priority" is not passed in with the data.
self.max_priority = 1.0
# Capacity needs to be the power of 2.
capacity = int(np.power(2, np.ceil(np.log2(self.buffer_size))))
self.sum_tree = SumSegmentTree(capacity)
if self.IS_weight:
self.min_tree = MinSegmentTree(capacity)
self.delta_anneal = (1 - self.IS_weight_power_factor) / self.IS_weight_anneal_train_iter
self.pivot = 0
def push(self, chain: Callable, data: Any, meta: Optional[dict] = None, *args, **kwargs) -> BufferedData:
if meta is None:
meta = {'priority': self.max_priority}
else:
if 'priority' not in meta:
meta['priority'] = self.max_priority
meta['priority_idx'] = self.pivot
self._update_tree(meta['priority'], self.pivot)
buffered = chain(data, meta=meta, *args, **kwargs)
index = buffered.index
self.buffer_idx[self.pivot] = index
self.pivot = (self.pivot + 1) % self.buffer_size
return buffered
def sample(self, chain: Callable, size: int, *args,
**kwargs) -> Union[List[BufferedData], List[List[BufferedData]]]:
# Divide [0, 1) into size intervals on average
intervals = np.array([i * 1.0 / size for i in range(size)])
# Uniformly sample within each interval
mass = intervals + np.random.uniform(size=(size, )) * 1. / size
# Rescale to [0, S), where S is the sum of all datas' priority (root value of sum tree)
mass *= self.sum_tree.reduce()
indices = [self.sum_tree.find_prefixsum_idx(m) for m in mass]
indices = [self.buffer_idx[i] for i in indices]
# Sample with indices
data = chain(indices=indices, *args, **kwargs)
if self.IS_weight:
# Calculate max weight for normalizing IS
sum_tree_root = self.sum_tree.reduce()
p_min = self.min_tree.reduce() / sum_tree_root
buffer_count = self.buffer.count()
max_weight = (buffer_count * p_min) ** (-self.IS_weight_power_factor)
for i in range(len(data)):
meta = data[i].meta
priority_idx = meta['priority_idx']
p_sample = self.sum_tree[priority_idx] / sum_tree_root
weight = (buffer_count * p_sample) ** (-self.IS_weight_power_factor)
meta['priority_IS'] = weight / max_weight
data[i].data['priority_IS'] = torch.as_tensor([meta['priority_IS']]).float() # for compability
self.IS_weight_power_factor = min(1.0, self.IS_weight_power_factor + self.delta_anneal)
return data
def update(self, chain: Callable, index: str, data: Any, meta: Any, *args, **kwargs) -> None:
update_flag = chain(index, data, meta, *args, **kwargs)
if update_flag: # when update succeed
assert meta is not None, "Please indicate dict-type meta in priority update"
new_priority, idx = meta['priority'], meta['priority_idx']
assert new_priority >= 0, "new_priority should greater than 0, but found {}".format(new_priority)
new_priority += 1e-5 # Add epsilon to avoid priority == 0
self._update_tree(new_priority, idx)
self.max_priority = max(self.max_priority, new_priority)
def delete(self, chain: Callable, index: str, *args, **kwargs) -> None:
for item in self.buffer.storage:
meta = item.meta
priority_idx = meta['priority_idx']
self.sum_tree[priority_idx] = self.sum_tree.neutral_element
self.min_tree[priority_idx] = self.min_tree.neutral_element
self.buffer_idx.pop(priority_idx)
return chain(index, *args, **kwargs)
def clear(self, chain: Callable) -> None:
self.max_priority = 1.0
capacity = int(np.power(2, np.ceil(np.log2(self.buffer_size))))
self.sum_tree = SumSegmentTree(capacity)
if self.IS_weight:
self.min_tree = MinSegmentTree(capacity)
self.buffer_idx = {}
self.pivot = 0
chain()
def _update_tree(self, priority: float, idx: int) -> None:
weight = priority ** self.priority_power_factor
self.sum_tree[idx] = weight
if self.IS_weight:
self.min_tree[idx] = weight
def state_dict(self) -> Dict:
return {
'max_priority': self.max_priority,
'IS_weight_power_factor': self.IS_weight_power_factor,
'sumtree': self.sumtree,
'mintree': self.mintree,
'buffer_idx': self.buffer_idx,
}
def load_state_dict(self, _state_dict: Dict, deepcopy: bool = False) -> None:
for k, v in _state_dict.items():
if deepcopy:
setattr(self, '{}'.format(k), copy.deepcopy(v))
else:
setattr(self, '{}'.format(k), v)
def __call__(self, action: str, chain: Callable, *args, **kwargs) -> Any:
if action in ["push", "sample", "update", "delete", "clear"]:
return getattr(self, action)(chain, *args, **kwargs)
return chain(*args, **kwargs)
| 45.323529
| 119
| 0.61843
|
fe1290af386216116f7b306830a6a5fb263e6b6f
| 12,607
|
py
|
Python
|
spectrochempy/core/analysis/peakfinding.py
|
Mailaender/spectrochempy
|
d58221afeb9f78e2e3e0079b3fd6c0162a902c04
|
[
"CECILL-B"
] | null | null | null |
spectrochempy/core/analysis/peakfinding.py
|
Mailaender/spectrochempy
|
d58221afeb9f78e2e3e0079b3fd6c0162a902c04
|
[
"CECILL-B"
] | null | null | null |
spectrochempy/core/analysis/peakfinding.py
|
Mailaender/spectrochempy
|
d58221afeb9f78e2e3e0079b3fd6c0162a902c04
|
[
"CECILL-B"
] | null | null | null |
# -*- coding: utf-8 -*-
# ======================================================================================================================
# Copyright (©) 2015-2021 LCS - Laboratoire Catalyse et Spectrochimie, Caen, France. =
# CeCILL-B FREE SOFTWARE LICENSE AGREEMENT - See full LICENSE agreement in the root directory =
# ======================================================================================================================
__all__ = ["find_peaks"]
__dataset_methods__ = ["find_peaks"]
import scipy.signal
import numpy as np
from datetime import datetime, timezone
"""wrappers of scipy.signal peak finding functions"""
# Todo:
# find_peaks_cwt(vector, widths[, wavelet, ...]) Attempt to find the peaks in a 1-D array.
# argrelmin(data[, axis, order, mode]) Calculate the relative minima of data.
# argrelmax(data[, axis, order, mode]) Calculate the relative maxima of data.
# argrelextrema(data, comparator[, axis, ...]) Calculate the relative extrema of data.
def find_peaks(
dataset,
height=None,
window_length=3,
threshold=None,
distance=None,
prominence=None,
width=None,
wlen=None,
rel_height=0.5,
plateau_size=None,
use_coord=True,
):
"""
Wrapper and extension of scpy.signal.find_peaks(). Find peaks inside a 1D NDDataset based on peak properties.
This function finds all local maxima by simple comparison of neighbouring values. Optionally, a subset of these
peaks can be selected by specifying conditions for a peak's properties.
Parameters
----------
dataset : |NDDataset|
A 1D NDDataset or a 2D NDdataset with `len(X.y) == 1`
height : number or ndarray or sequence, optional
Required height of peaks. Either a number, ``None``, an array matching
`x` or a 2-element sequence of the former. The first element is
always interpreted as the minimal and the second, if supplied, as the
maximal required height.
window_length: int, default: 5
The length of the filter window used to interpolate the maximum. window_length must be a positive odd integer.
If set to one, the actual maximum is returned.
threshold : number or ndarray or sequence, optional
Required threshold of peaks, the vertical distance to its neighbouring
samples. Either a number, ``None``, an array matching `x` or a
2-element sequence of the former. The first element is always
interpreted as the minimal and the second, if supplied, as the maximal
required threshold.
distance : number, optional
Required minimal horizontal distance (>= 1) in samples between
neighbouring peaks. Smaller peaks are removed first until the condition
is fulfilled for all remaining peaks.
prominence : number or ndarray or sequence, optional
Required prominence of peaks. Either a number, ``None``, an array
matching `x` or a 2-element sequence of the former. The first
element is always interpreted as the minimal and the second, if
supplied, as the maximal required prominence.
width : number or ndarray or sequence, optional
Required width of peaks in samples. Either a number, ``None``, an array
matching `x` or a 2-element sequence of the former. The first
element is always interpreted as the minimal and the second, if
supplied, as the maximal required width. Floats are interpreted as width
measured along the 'x' Coord; ints are interpreted as a number of points
wlen : int or float, optional
Used for calculation of the peaks prominences, thus it is only used if
one of the arguments `prominence` or `width` is given. Floats are interpreted
as measured along the 'x' Coord; ints are interpreted as a number of points.
See argument len` in `peak_prominences` of the scipy documentation for a full
description of its effects.
rel_height : float, optional,
Used for calculation of the peaks width, thus it is only used if `width`
is given. See argument `rel_height` in `peak_widths` of the scipy documentation
for a full description of its effects.
plateau_size : number or ndarray or sequence, optional
Required size of the flat top of peaks in samples. Either a number,
``None``, an array matching `x` or a 2-element sequence of the former.
The first element is always interpreted as the minimal and the second,
if supplied as the maximal required plateau size. Floats are interpreted
as measured along the 'x' Coord; ints are interpreted as a number of points.
interp: bool, optional
locate maximum by quadratic interpolation
use_coord : bool, optional
Set whether the x Coord (when it exists) should be used instead of indices
for the positions and width
Returns
-------
peaks : ndarray
Indices of peaks in `x` that satisfy all given conditions.
properties : dict
A dictionary containing properties of the returned peaks which were
calculated as intermediate results during evaluation of the specified
conditions:
* peak_heights
If `height` is given, the height of each peak in `x`.
* left_thresholds, right_thresholds
If `threshold` is given, these keys contain a peaks vertical
distance to its neighbouring samples.
* prominences, right_bases, left_bases
If `prominence` is given, these keys are accessible. See
`peak_prominences` for a description of their content.
* width_heights, left_ips, right_ips
If `width` is given, these keys are accessible. See `peak_widths`
for a description of their content.
* plateau_sizes, left_edges', 'right_edges'
If `plateau_size` is given, these keys are accessible and contain
the indices of a peak's edges (edges are still part of the
plateau) and the calculated plateau sizes.
To calculate and return properties without excluding peaks, provide the
open interval ``(None, None)`` as a value to the appropriate argument
(excluding `distance`).
Warns
-----
PeakPropertyWarning
Raised if a peak's properties have unexpected values (see
`peak_prominences` and `peak_widths`).
Warnings
--------
This function may return unexpected results for data containing NaNs. To
avoid this, NaNs should either be removed or replaced.
See Also
--------
find_peaks_cwt:
in scipy.signal: Find peaks using the wavelet transformation.
peak_prominences:
in scipy.signal: Directly calculate the prominence of peaks.
peak_widths:
in scipy.signal: Directly calculate the width of peaks.
Notes
-----
In the context of this function, a peak or local maximum is defined as any
sample whose two direct neighbours have a smaller amplitude. For flat peaks
(more than one sample of equal amplitude wide) the index of the middle
sample is returned (rounded down in case the number of samples is even).
For noisy signals the peak locations can be off because the noise might
change the position of local maxima. In those cases consider smoothing the
signal before searching for peaks or use other peak finding and fitting
methods (like `find_peaks_cwt`).
Some additional comments on specifying conditions:
* Almost all conditions (excluding `distance`) can be given as half-open or
closed intervals, e.g ``1`` or ``(1, None)`` defines the half-open
interval :math:`[1, \\infty]` while ``(None, 1)`` defines the interval
:math:`[-\\infty, 1]`. The open interval ``(None, None)`` can be specified
as well, which returns the matching properties without exclusion of peaks.
* The border is always included in the interval used to select valid peaks.
* For several conditions the interval borders can be specified with
arrays matching `x` in shape which enables dynamic constrains based on
the sample position.
* The conditions are evaluated in the following order: `plateau_size`,
`height`, `threshold`, `distance`, `prominence`, `width`. In most cases
this order is the fastest one because faster operations are applied first
to reduce the number of peaks that need to be evaluated later.
* While indices in `peaks` are guaranteed to be at least `distance` samples
apart, edges of flat peaks may be closer than the allowed `distance`.
* Use `wlen` to reduce the time it takes to evaluate the conditions for
`prominence` or `width` if `x` is large or has many local maxima
(see `peak_prominences`).
"""
X = dataset.squeeze()
if X.ndim > 1:
raise ValueError(
"Works only for 1D NDDataset or a 2D NDdataset with `len(X.y) <= 1`"
)
if window_length % 2 == 0:
raise ValueError("window_length must be an odd integer")
# if the following parameters are entered as floats, the coordinates are used. Else, they will
# be treated as indices as in scipy.signal.find_peak()
# transform coord (if exists) to index
if use_coord and X.coordset is not None:
step = np.abs(X.x.data[-1] - X.x.data[0]) / (len(X.x) - 1)
if distance is not None:
distance = int(round(distance / step))
if width is not None:
width = int(round(width / step))
if wlen is not None:
wlen = int(round(wlen / step))
if plateau_size is not None:
plateau_size = int(round(plateau_size / step))
data = X.data
peaks, properties = scipy.signal.find_peaks(
data,
height=height,
threshold=threshold,
distance=distance,
prominence=prominence,
width=width,
wlen=wlen,
rel_height=rel_height,
plateau_size=plateau_size,
)
# if dataset.ndim == 1:
out = X[peaks]
# else: # ndim == 2
# out = dataset[:, peaks]
if window_length > 1:
# quadratic interpolation to find the maximum
for i, peak in enumerate(peaks):
y = data[peak - window_length // 2 : peak + window_length // 2 + 1]
if use_coord and X.coordset is not None:
x = X.x.data[peak - window_length // 2 : peak + window_length // 2 + 1]
else:
x = range(peak - window_length // 2, peak + window_length // 2 + 1)
coef = np.polyfit(x, y, 2)
x_at_max = -coef[1] / (2 * coef[0])
y_at_max = np.poly1d(coef)(x_at_max)
if out.ndim == 1:
out[i] = y_at_max
else:
out[:, i] = y_at_max
out.x.data[i] = x_at_max
# transform back index to coord
if use_coord and X.coordset is not None:
for key in (
"left_bases",
"right_bases",
"left_edges",
"right_edges",
): # values are int type
if key in properties:
properties[key] = properties[key].astype("float64")
for i, index in enumerate(properties[key]):
properties[key][i] = X.x.data[int(index)]
for key in ("left_ips", "right_ips"): # values are float type
if key in properties:
for i, ips in enumerate(properties[key]):
# interpolate coord
floor = int(np.floor(ips))
properties[key][i] = X.x.data[floor] + (ips - floor) * (
X.x.data[floor + 1] - X.x.data[floor]
)
if "widths" in properties:
for i in range(len(properties["widths"])):
properties["widths"][i] = np.abs(
properties["left_ips"][i] - properties["right_ips"][i]
)
if "plateau_sizes" in properties:
properties["plateau_sizes"] = properties["plateau_sizes"].astype("float64")
for i in range(len(properties["plateau_sizes"])):
properties["plateau_sizes"][i] = np.abs(
properties["left_edges"][i] - properties["right_edges"][i]
)
out.name = "peaks of " + X.name
out.history[-1] = (
str(datetime.now(timezone.utc)) + f": find_peaks(): {len(peaks)} peak(s) found"
)
return out, properties
| 43.622837
| 120
| 0.627746
|
b36be15efa6153683a9a2773fd288baf98e11280
| 532
|
py
|
Python
|
src/LMR.py
|
cosmobobak/Viridithas-Chess
|
4eb9b93d4f302496c80f969dd05f8b7c227f3cf4
|
[
"MIT"
] | null | null | null |
src/LMR.py
|
cosmobobak/Viridithas-Chess
|
4eb9b93d4f302496c80f969dd05f8b7c227f3cf4
|
[
"MIT"
] | null | null | null |
src/LMR.py
|
cosmobobak/Viridithas-Chess
|
4eb9b93d4f302496c80f969dd05f8b7c227f3cf4
|
[
"MIT"
] | null | null | null |
def search_reduction_factor(lateness: int, is_check: bool, gives_check: bool, is_capt: bool, is_promo: bool, d: float) -> float:
# return 1.0
DO_NOT_REDUCE = is_capt or is_promo or d < 3
CHECK_EXTENSION = is_check or gives_check and d > 3
if DO_NOT_REDUCE:
return 0.6 if CHECK_EXTENSION else 1
else:
# return 1.0
if lateness <= 5:
return 1
if lateness <= 10:
# moves after move six
return 1.4
# moves after move 11
return 1.7
| 29.555556
| 128
| 0.595865
|
335f6704eacccbc1dac97e186c426ff0ea7ca50b
| 99
|
py
|
Python
|
lamp/__init__.py
|
Den4200/lamp
|
85219d207863032b64e30d07c7ea05c4a5251ad9
|
[
"MIT"
] | null | null | null |
lamp/__init__.py
|
Den4200/lamp
|
85219d207863032b64e30d07c7ea05c4a5251ad9
|
[
"MIT"
] | 3
|
2021-06-08T21:21:16.000Z
|
2022-01-13T02:33:12.000Z
|
lamp/__init__.py
|
Den4200/lamp
|
85219d207863032b64e30d07c7ea05c4a5251ad9
|
[
"MIT"
] | null | null | null |
from configparser import ConfigParser
config = ConfigParser()
config.read('sys/lamp/config.ini')
| 16.5
| 37
| 0.787879
|
2a7d0e28dd59bfe249dd976aa9b4890efcd8bce1
| 804
|
py
|
Python
|
lib/__init__.py
|
mikeyy/GT-Finder
|
b21e929f4fcfa99a9520edeaec91bbf6870e04f0
|
[
"WTFPL"
] | null | null | null |
lib/__init__.py
|
mikeyy/GT-Finder
|
b21e929f4fcfa99a9520edeaec91bbf6870e04f0
|
[
"WTFPL"
] | null | null | null |
lib/__init__.py
|
mikeyy/GT-Finder
|
b21e929f4fcfa99a9520edeaec91bbf6870e04f0
|
[
"WTFPL"
] | null | null | null |
#!/usr/bin/python
#
# This file is part of GT Finder.
#
# Copyright(c) 2016 Michael Mooney(mikeyy@mikeyy.com).
#
# This file may be licensed under the terms of of the
# GNU General Public License Version 2 (the ``GPL'').
#
# Software distributed under the License is distributed
# on an ``AS IS'' basis, WITHOUT WARRANTY OF ANY KIND, either
# express or implied. See the GPL for the specific language
# governing rights and limitations.
#
# You should have received a copy of the GPL along with this
# program. If not, go to http://www.gnu.org/licenses/gpl.html
# or write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
__author__ = 'Michael Mooney'
__license__ = 'GNU General Public License Version 2' # 'Prohibited/Intellectual Property'
pass
| 32.16
| 89
| 0.74005
|
e1e6181d8ca275d8f891dee1e7a6a575d186696a
| 7,070
|
py
|
Python
|
homeassistant/components/configurator/__init__.py
|
jasperro/core
|
26d7b2164e8a971506790ae5af06f31abdf278b5
|
[
"Apache-2.0"
] | 2
|
2020-03-02T19:17:52.000Z
|
2020-03-02T19:17:53.000Z
|
homeassistant/components/configurator/__init__.py
|
jasperro/core
|
26d7b2164e8a971506790ae5af06f31abdf278b5
|
[
"Apache-2.0"
] | 6
|
2021-02-08T21:05:36.000Z
|
2022-03-12T00:54:00.000Z
|
homeassistant/components/configurator/__init__.py
|
jasperro/core
|
26d7b2164e8a971506790ae5af06f31abdf278b5
|
[
"Apache-2.0"
] | 1
|
2020-03-07T10:43:50.000Z
|
2020-03-07T10:43:50.000Z
|
"""
Support to allow pieces of code to request configuration from the user.
Initiate a request by calling the `request_config` method with a callback.
This will return a request id that has to be used for future calls.
A callback has to be provided to `request_config` which will be called when
the user has submitted configuration information.
"""
import functools as ft
import logging
from homeassistant.const import (
ATTR_ENTITY_PICTURE,
ATTR_FRIENDLY_NAME,
EVENT_TIME_CHANGED,
)
from homeassistant.core import callback as async_callback
from homeassistant.helpers.entity import async_generate_entity_id
from homeassistant.loader import bind_hass
from homeassistant.util.async_ import run_callback_threadsafe
_LOGGER = logging.getLogger(__name__)
_KEY_INSTANCE = "configurator"
DATA_REQUESTS = "configurator_requests"
ATTR_CONFIGURE_ID = "configure_id"
ATTR_DESCRIPTION = "description"
ATTR_DESCRIPTION_IMAGE = "description_image"
ATTR_ERRORS = "errors"
ATTR_FIELDS = "fields"
ATTR_LINK_NAME = "link_name"
ATTR_LINK_URL = "link_url"
ATTR_SUBMIT_CAPTION = "submit_caption"
DOMAIN = "configurator"
ENTITY_ID_FORMAT = DOMAIN + ".{}"
SERVICE_CONFIGURE = "configure"
STATE_CONFIGURE = "configure"
STATE_CONFIGURED = "configured"
@bind_hass
@async_callback
def async_request_config(
hass,
name,
callback=None,
description=None,
description_image=None,
submit_caption=None,
fields=None,
link_name=None,
link_url=None,
entity_picture=None,
):
"""Create a new request for configuration.
Will return an ID to be used for sequent calls.
"""
if link_name is not None and link_url is not None:
description += f"\n\n[{link_name}]({link_url})"
if description_image is not None:
description += f"\n\n"
instance = hass.data.get(_KEY_INSTANCE)
if instance is None:
instance = hass.data[_KEY_INSTANCE] = Configurator(hass)
request_id = instance.async_request_config(
name, callback, description, submit_caption, fields, entity_picture
)
if DATA_REQUESTS not in hass.data:
hass.data[DATA_REQUESTS] = {}
hass.data[DATA_REQUESTS][request_id] = instance
return request_id
@bind_hass
def request_config(hass, *args, **kwargs):
"""Create a new request for configuration.
Will return an ID to be used for sequent calls.
"""
return run_callback_threadsafe(
hass.loop, ft.partial(async_request_config, hass, *args, **kwargs)
).result()
@bind_hass
@async_callback
def async_notify_errors(hass, request_id, error):
"""Add errors to a config request."""
try:
hass.data[DATA_REQUESTS][request_id].async_notify_errors(request_id, error)
except KeyError:
# If request_id does not exist
pass
@bind_hass
def notify_errors(hass, request_id, error):
"""Add errors to a config request."""
return run_callback_threadsafe(
hass.loop, async_notify_errors, hass, request_id, error
).result()
@bind_hass
@async_callback
def async_request_done(hass, request_id):
"""Mark a configuration request as done."""
try:
hass.data[DATA_REQUESTS].pop(request_id).async_request_done(request_id)
except KeyError:
# If request_id does not exist
pass
@bind_hass
def request_done(hass, request_id):
"""Mark a configuration request as done."""
return run_callback_threadsafe(
hass.loop, async_request_done, hass, request_id
).result()
async def async_setup(hass, config):
"""Set up the configurator component."""
return True
class Configurator:
"""The class to keep track of current configuration requests."""
def __init__(self, hass):
"""Initialize the configurator."""
self.hass = hass
self._cur_id = 0
self._requests = {}
hass.services.async_register(
DOMAIN, SERVICE_CONFIGURE, self.async_handle_service_call
)
@async_callback
def async_request_config(
self, name, callback, description, submit_caption, fields, entity_picture
):
"""Set up a request for configuration."""
entity_id = async_generate_entity_id(ENTITY_ID_FORMAT, name, hass=self.hass)
if fields is None:
fields = []
request_id = self._generate_unique_id()
self._requests[request_id] = (entity_id, fields, callback)
data = {
ATTR_CONFIGURE_ID: request_id,
ATTR_FIELDS: fields,
ATTR_FRIENDLY_NAME: name,
ATTR_ENTITY_PICTURE: entity_picture,
}
data.update(
{
key: value
for key, value in [
(ATTR_DESCRIPTION, description),
(ATTR_SUBMIT_CAPTION, submit_caption),
]
if value is not None
}
)
self.hass.states.async_set(entity_id, STATE_CONFIGURE, data)
return request_id
@async_callback
def async_notify_errors(self, request_id, error):
"""Update the state with errors."""
if not self._validate_request_id(request_id):
return
entity_id = self._requests[request_id][0]
state = self.hass.states.get(entity_id)
new_data = dict(state.attributes)
new_data[ATTR_ERRORS] = error
self.hass.states.async_set(entity_id, STATE_CONFIGURE, new_data)
@async_callback
def async_request_done(self, request_id):
"""Remove the configuration request."""
if not self._validate_request_id(request_id):
return
entity_id = self._requests.pop(request_id)[0]
# If we remove the state right away, it will not be included with
# the result of the service call (current design limitation).
# Instead, we will set it to configured to give as feedback but delete
# it shortly after so that it is deleted when the client updates.
self.hass.states.async_set(entity_id, STATE_CONFIGURED)
def deferred_remove(event):
"""Remove the request state."""
self.hass.states.async_remove(entity_id)
self.hass.bus.async_listen_once(EVENT_TIME_CHANGED, deferred_remove)
async def async_handle_service_call(self, call):
"""Handle a configure service call."""
request_id = call.data.get(ATTR_CONFIGURE_ID)
if not self._validate_request_id(request_id):
return
# pylint: disable=unused-variable
entity_id, fields, callback = self._requests[request_id]
# field validation goes here?
if callback:
await self.hass.async_add_job(callback, call.data.get(ATTR_FIELDS, {}))
def _generate_unique_id(self):
"""Generate a unique configurator ID."""
self._cur_id += 1
return f"{id(self)}-{self._cur_id}"
def _validate_request_id(self, request_id):
"""Validate that the request belongs to this instance."""
return request_id in self._requests
| 28.857143
| 84
| 0.678359
|
629489b5582efe1ecdeec49cde30977c3fa37aa4
| 813
|
py
|
Python
|
secondProject/driveTest/driveTest/urls.py
|
loic9654/Djangodev
|
2babb235d68f508c64171a146be8483009dea7f7
|
[
"Apache-2.0"
] | null | null | null |
secondProject/driveTest/driveTest/urls.py
|
loic9654/Djangodev
|
2babb235d68f508c64171a146be8483009dea7f7
|
[
"Apache-2.0"
] | null | null | null |
secondProject/driveTest/driveTest/urls.py
|
loic9654/Djangodev
|
2babb235d68f508c64171a146be8483009dea7f7
|
[
"Apache-2.0"
] | null | null | null |
"""driveTest URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('projects', include('driveManager.urls')),
path('admin/', admin.site.urls),
]
| 35.347826
| 77
| 0.707257
|
e2e216fc55a1438ef86973556380eb127fb7e0a0
| 8,190
|
py
|
Python
|
backend/src/baserow/contrib/database/application_types.py
|
cjh0613/baserow
|
62871f5bf53c9d25446976031aacb706c0abe584
|
[
"MIT"
] | null | null | null |
backend/src/baserow/contrib/database/application_types.py
|
cjh0613/baserow
|
62871f5bf53c9d25446976031aacb706c0abe584
|
[
"MIT"
] | null | null | null |
backend/src/baserow/contrib/database/application_types.py
|
cjh0613/baserow
|
62871f5bf53c9d25446976031aacb706c0abe584
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from django.core.management.color import no_style
from django.db import connections
from django.urls import path, include
from baserow.contrib.database.fields.registries import field_type_registry
from baserow.contrib.database.views.registries import view_type_registry
from baserow.core.registries import ApplicationType
from .api.serializers import DatabaseSerializer
from .models import Database, Table
from baserow.core.trash.handler import TrashHandler
class DatabaseApplicationType(ApplicationType):
type = "database"
model_class = Database
instance_serializer_class = DatabaseSerializer
def pre_delete(self, database):
"""
When a database is deleted we must also delete the related tables via the table
handler.
"""
database_tables = (
database.table_set(manager="objects_and_trash")
.all()
.select_related("database__group")
)
for table in database_tables:
TrashHandler.permanently_delete(table)
def get_api_urls(self):
from .api import urls as api_urls
return [
path("database/", include(api_urls, namespace=self.type)),
]
def export_serialized(self, database, files_zip, storage):
"""
Exports the database application type to a serialized format that can later be
be imported via the `import_serialized`.
"""
tables = database.table_set.all().prefetch_related(
"field_set",
"view_set",
"view_set__viewfilter_set",
"view_set__viewsort_set",
)
serialized_tables = []
for table in tables:
fields = table.field_set.all()
serialized_fields = []
for f in fields:
field = f.specific
field_type = field_type_registry.get_by_model(field)
serialized_fields.append(field_type.export_serialized(field))
serialized_views = []
for v in table.view_set.all():
view = v.specific
view_type = view_type_registry.get_by_model(view)
serialized_views.append(view_type.export_serialized(view))
model = table.get_model(fields=fields)
serialized_rows = []
table_cache = {}
for row in model.objects.all():
serialized_row = {"id": row.id, "order": str(row.order)}
for field_object in model._field_objects.values():
field_name = field_object["name"]
field_type = field_object["type"]
serialized_row[field_name] = field_type.get_export_serialized_value(
row, field_name, table_cache, files_zip, storage
)
serialized_rows.append(serialized_row)
serialized_tables.append(
{
"id": table.id,
"name": table.name,
"order": table.order,
"fields": serialized_fields,
"views": serialized_views,
"rows": serialized_rows,
}
)
serialized = super().export_serialized(database, files_zip, storage)
serialized["tables"] = serialized_tables
return serialized
def import_serialized(
self, group, serialized_values, id_mapping, files_zip, storage
):
"""
Imports a database application exported by the `export_serialized` method.
"""
if "database_tables" not in id_mapping:
id_mapping["database_tables"] = {}
tables = serialized_values.pop("tables")
database = super().import_serialized(
group, serialized_values, id_mapping, files_zip, storage
)
connection = connections[settings.USER_TABLE_DATABASE]
# First, we want to create all the table instances because it could be that
# field or view properties depend on the existence of a table.
for table in tables:
table_object = Table.objects.create(
database=database,
name=table["name"],
order=table["order"],
)
id_mapping["database_tables"][table["id"]] = table_object.id
table["_object"] = table_object
table["_field_objects"] = []
# Because view properties might depend on fields, we first want to create all
# the fields.
for table in tables:
for field in table["fields"]:
field_type = field_type_registry.get(field["type"])
field_object = field_type.import_serialized(
table["_object"], field, id_mapping
)
if field_object:
table["_field_objects"].append(field_object)
# Now that the all tables and fields exist, we can create the views and create
# the table schema in the database.
for table in tables:
for view in table["views"]:
view_type = view_type_registry.get(view["type"])
view_type.import_serialized(table["_object"], view, id_mapping)
# We don't need to create all the fields individually because the schema
# editor can handle the creation of the table schema in one go.
with connection.schema_editor() as schema_editor:
model = table["_object"].get_model(
fields=table["_field_objects"], field_ids=[]
)
schema_editor.create_model(model)
# Now that everything is in place we can start filling the table with the rows
# in an efficient matter by using the bulk_create functionality.
for table in tables:
model = table["_object"].get_model(
fields=table["_field_objects"], field_ids=[]
)
field_ids = [field_object.id for field_object in table["_field_objects"]]
rows_to_be_inserted = []
for row in table["rows"]:
row_object = model(id=row["id"], order=row["order"])
for field in table["fields"]:
field_type = field_type_registry.get(field["type"])
new_field_id = id_mapping["database_fields"][field["id"]]
# If the new field id is not present in the field_ids then we don't
# want to set that value on the row. This is because upon creation
# of the field there could be a deliberate choice not to populate
# that field. This is for example the case with the related field
# of the `link_row` field which would result in duplicates if we
# would populate.
if new_field_id in field_ids:
field_type.set_import_serialized_value(
row_object,
f'field_{id_mapping["database_fields"][field["id"]]}',
row[f'field_{field["id"]}'],
id_mapping,
files_zip,
storage,
)
rows_to_be_inserted.append(row_object)
# We want to insert the rows in bulk because there could potentially be
# hundreds of thousands of rows in there and this will result in better
# performance.
model.objects.bulk_create(rows_to_be_inserted)
# When the rows are inserted we keep the provide the old ids and because of
# that the auto increment is still set at `1`. This needs to be set to the
# maximum value because otherwise creating a new row could later fail.
connection = connections[settings.USER_TABLE_DATABASE]
sequence_sql = connection.ops.sequence_reset_sql(no_style(), [model])
with connection.cursor() as cursor:
cursor.execute(sequence_sql[0])
return database
| 41.155779
| 88
| 0.586569
|
13abbb037693069a2f0f2d49c17c92c0be2f77e1
| 172
|
py
|
Python
|
_1327/documents/templatetags/filename.py
|
julkw/1327
|
480521256670b9efc787528ab7cdaa4b3305f422
|
[
"MIT"
] | 10
|
2015-04-13T18:54:46.000Z
|
2018-11-07T10:42:03.000Z
|
_1327/documents/templatetags/filename.py
|
julkw/1327
|
480521256670b9efc787528ab7cdaa4b3305f422
|
[
"MIT"
] | 530
|
2015-02-16T19:26:44.000Z
|
2018-12-03T18:45:44.000Z
|
_1327/documents/templatetags/filename.py
|
julkw/1327
|
480521256670b9efc787528ab7cdaa4b3305f422
|
[
"MIT"
] | 15
|
2019-07-21T08:45:09.000Z
|
2022-01-26T13:28:03.000Z
|
import os
from django import template
register = template.Library()
@register.filter
def filename(string):
delimiter = os.sep
return string.split(delimiter)[-1]
| 15.636364
| 38
| 0.738372
|
8e32fc3a747a96a73c9da60876bfa6ff04412af9
| 3,358
|
py
|
Python
|
test/integration/035_changing_relation_type_test/test_changing_relation_type.py
|
cwkrebs/dbt
|
0b135772d2db2549225365eeeb465b5316930145
|
[
"Apache-2.0"
] | 1
|
2022-01-09T19:33:25.000Z
|
2022-01-09T19:33:25.000Z
|
test/integration/035_changing_relation_type_test/test_changing_relation_type.py
|
cwkrebs/dbt
|
0b135772d2db2549225365eeeb465b5316930145
|
[
"Apache-2.0"
] | 1
|
2019-02-14T20:10:46.000Z
|
2019-02-19T13:06:38.000Z
|
test/integration/035_changing_relation_type_test/test_changing_relation_type.py
|
cwkrebs/dbt
|
0b135772d2db2549225365eeeb465b5316930145
|
[
"Apache-2.0"
] | null | null | null |
from test.integration.base import DBTIntegrationTest, use_profile
class TestChangingRelationType(DBTIntegrationTest):
@property
def schema(self):
return "changing_relation_type_035"
@staticmethod
def dir(path):
return "test/integration/035_changing_relation_type_test/" + path.lstrip("/")
@property
def models(self):
return self.dir("models")
def swap_types_and_test(self):
# test that dbt is able to do intelligent things when changing
# between materializations that create tables and views.
results = self.run_dbt(['run', '--vars', 'materialized: view'])
self.assertEquals(results[0].node['config']['materialized'], 'view')
self.assertEqual(len(results), 1)
results = self.run_dbt(['run', '--vars', 'materialized: table'])
self.assertEquals(results[0].node['config']['materialized'], 'table')
self.assertEqual(len(results), 1)
results = self.run_dbt(['run', '--vars', 'materialized: view'])
self.assertEquals(results[0].node['config']['materialized'], 'view')
self.assertEqual(len(results), 1)
results = self.run_dbt(['run', '--vars', 'materialized: incremental'])
self.assertEquals(results[0].node['config']['materialized'], 'incremental')
self.assertEqual(len(results), 1)
results = self.run_dbt(['run', '--vars', 'materialized: view'])
self.assertEquals(results[0].node['config']['materialized'], 'view')
self.assertEqual(len(results), 1)
@use_profile("postgres")
def test__postgres__switch_materialization(self):
self.swap_types_and_test()
@use_profile("snowflake")
def test__snowflake__switch_materialization(self):
self.swap_types_and_test()
@use_profile("redshift")
def test__redshift__switch_materialization(self):
self.swap_types_and_test()
@use_profile("bigquery")
def test__bigquery__switch_materialization(self):
# BQ has a weird check that prevents the dropping of tables in the view materialization
# if --full-refresh is not provided. This is to prevent the clobbering of a date-sharded
# table with a view if a model config is accidently changed. We should probably remove that check
# and then remove these bq-specific tests
results = self.run_dbt(['run', '--vars', 'materialized: view'])
self.assertEquals(results[0].node['config']['materialized'], 'view')
self.assertEqual(len(results), 1)
results = self.run_dbt(['run', '--vars', 'materialized: table'])
self.assertEquals(results[0].node['config']['materialized'], 'table')
self.assertEqual(len(results), 1)
results = self.run_dbt(['run', '--vars', 'materialized: view', "--full-refresh"])
self.assertEquals(results[0].node['config']['materialized'], 'view')
self.assertEqual(len(results), 1)
results = self.run_dbt(['run', '--vars', 'materialized: incremental'])
self.assertEquals(results[0].node['config']['materialized'], 'incremental')
self.assertEqual(len(results), 1)
results = self.run_dbt(['run', '--vars', 'materialized: view', "--full-refresh"])
self.assertEquals(results[0].node['config']['materialized'], 'view')
self.assertEqual(len(results), 1)
| 41.975
| 105
| 0.656939
|
4a4b3ddc8338d75ccd898432cc400e4924ad4a96
| 628
|
py
|
Python
|
rateApp/migrations/0004_auto_20210918_2201.py
|
enockabere/Project_Rating
|
b50d1474927b94d3d08a708eaff026e91ec9f950
|
[
"Unlicense"
] | null | null | null |
rateApp/migrations/0004_auto_20210918_2201.py
|
enockabere/Project_Rating
|
b50d1474927b94d3d08a708eaff026e91ec9f950
|
[
"Unlicense"
] | null | null | null |
rateApp/migrations/0004_auto_20210918_2201.py
|
enockabere/Project_Rating
|
b50d1474927b94d3d08a708eaff026e91ec9f950
|
[
"Unlicense"
] | null | null | null |
# Generated by Django 3.2.7 on 2021-09-18 22:01
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('rateApp', '0003_remove_profile_projects'),
]
operations = [
migrations.RemoveField(
model_name='project',
name='project_owner',
),
migrations.AddField(
model_name='profile',
name='projects',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='projects', to='rateApp.project'),
),
]
| 26.166667
| 142
| 0.627389
|
b6c9d3081e52e93cbdacae81b7cc8bf85d6ef117
| 17,282
|
py
|
Python
|
goetia/processors.py
|
camillescott/boink
|
db75dc0d87126c5ad20c35405699d89153f109a8
|
[
"MIT"
] | 3
|
2019-03-10T02:30:16.000Z
|
2020-02-07T20:11:26.000Z
|
goetia/processors.py
|
camillescott/boink
|
db75dc0d87126c5ad20c35405699d89153f109a8
|
[
"MIT"
] | 6
|
2018-04-11T02:01:18.000Z
|
2020-01-31T14:21:55.000Z
|
goetia/processors.py
|
camillescott/boink
|
db75dc0d87126c5ad20c35405699d89153f109a8
|
[
"MIT"
] | 2
|
2019-03-09T19:15:08.000Z
|
2019-04-18T19:27:08.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# (c) Camille Scott, 2021
# File : processors.py
# License: MIT
# Author : Camille Scott <camille.scott.w@gmail.com>
# Date : 05.11.2021
import atexit
from collections import OrderedDict, defaultdict
from enum import Enum, unique as unique_enum
import functools
import inspect
import json
import signal
import sys
import time
import traceback
from typing import Any
import curio
from goetia import libgoetia
from goetia.messages import (
AllMessages,
EndStream,
Error,
Interval,
SampleFinished,
SampleStarted,
)
from goetia.utils import is_iterable, time_iterable, Counter
DEFAULT_SOCKET = '/tmp/goetia.sock'
DEFAULT_INTERVAL = libgoetia.IntervalCounter.DEFAULT_INTERVAL
class QueueManager:
def __init__(self, q: curio.UniversalQueue, name: str):
assert type(q) is curio.UniversalQueue
self.q = q
self.name = name
self.subscribers = set()
self.subscriber_names = {}
def subscribe(self, q: curio.UniversalQueue, name: str):
if q not in self.subscribers:
self.subscribers.add(q)
self.subscriber_names[q] = name
def unsubscribe(self, q: curio.UniversalQueue) -> None:
try:
self.subscribers.remove(q)
del self.subscriber_names[q]
except:
pass
async def kill(self) -> None:
await self.q.put(None)
async def dispatch(self) -> None:
while True:
msg = await self.q.get()
for sub_q in self.subscribers:
await sub_q.put(msg)
await self.q.task_done()
if msg is None:
break
class MessageHandler:
def __init__(self, name, subscription):
self.subscription = subscription
self.name = name
self.handlers = defaultdict(list)
async def task(self, error_handler = None):
try:
msg_q = curio.Queue()
self.subscription.subscribe(msg_q, self.name)
while True:
msg = await msg_q.get()
if msg is None:
await msg_q.task_done()
break
for callback, args, kwargs in self.handlers[type(msg)]:
if inspect.iscoroutinefunction(callback):
await callback(msg, *args, **kwargs)
else:
callback(msg, *args, **kwargs)
for callback, args, kwargs in self.handlers[AllMessages]:
if inspect.iscoroutinefunction(callback):
await callback(msg, *args, **kwargs)
else:
callback(msg, *args, **kwargs)
await msg_q.task_done()
except curio.CancelledError:
raise
except Exception as e:
if error_handler is not None:
error_handler(e)
else:
raise
else:
self.subscription.unsubscribe(msg_q)
def on_message(self, msg_class, callback, *args, **kwargs):
assert type(msg_class) is type
self.handlers[msg_class].append((callback, args, kwargs))
def on_messages(self, msg_classes, callback, *args, **kwargs):
for msg_class in msg_classes:
self.handlers[msg_class].append((callback, args, kwargs))
@unique_enum
class RunState(Enum):
READY = 0
RUNNING = 1
SIGINT = 2
STOP_SATURATED = 3
STOP_ERROR = 4
STOP = 5
class AsyncSequenceProcessor:
def __init__(self, processor,
sample_iter,
echo = None,
broadcast_socket = None):
"""Manages advancing through a concrete FileProcessor
subblass asynchronously. The processor pushes Interval
updates on to the `worker_q`, which are also forwarded
to an `events_q`. Additional async tasks can subscribe to
either queue; the `events_q` is considered the outward-facing
point.
`sample_iter` should be conform to that produced by
`goetia.processing.iter_fastx_inputs`.
Args:
processor (libgoetia.InserterProcessor<T>): Processor to manage.
sample_iter (iterator): Iterator over pairs of or single samples.
echo (bool): Whether to echo `events_q` to the terminal.
broadcast_socket (str, optional): AF_UNIX socket to broadcast
the events queue on.
"""
self.worker_q = curio.UniversalQueue()
self.worker_subs = QueueManager(self.worker_q, 'worker_q')
self.events_q = curio.UniversalQueue()
self.events_subs = QueueManager(self.events_q, 'events_q')
self.channels = OrderedDict()
self.channels[self.worker_subs.name] = self.worker_subs
self.channels[self.events_subs.name] = self.events_subs
# We want everything from the worker q to also end
# up on the events q
self.subscribe('worker_q', self.events_q, 'events_q')
self.listener_tasks = []
self.processor = processor
self.sample_iter = sample_iter
self.run_echo = echo is not None
self.echo_file = '/dev/stderr' if echo is True else echo
self.state = RunState.READY
self.processed = set()
self.seconds_elapsed = 0
#super().__init__(broadcast_socket)
def get_channel(self, channel: str) -> QueueManager:
"""Query for the given channel name.
Args:
channel (str): The channel name.
Returns:
QueueManager: Manager for the channel.
"""
try:
return self.channels[channel]
except KeyError:
print(f'Requested invalid channel: "{channel}" does not exist.', file=sys.stderr)
raise
def subscribe(self, channel_name: str,
collection_q: curio.UniversalQueue,
subscriber_name: str) -> None:
"""Subscribe a queue of the given name to a channel.
Args:
channel_name (str): Name of the channel.
collection_q (curio.Queue): The queue to collect on.
subscriber_name (str): Name of the subscriber.
"""
self.get_channel(channel_name).subscribe(collection_q, subscriber_name)
def unsubscribe(self, channel_name: str,
collection_q: curio.UniversalQueue) -> None:
"""Stop receving data from the named channel on the given queue.
Args:
channel_name (str): Name of the channel.
collection_q (curio.Queue): Queue object to remove.
"""
self.get_channel(channel_name).unsubscribe(collection_q)
def add_listener(self, channel_name: str,
subscriber_name: str) -> MessageHandler:
channel = self.get_channel(channel_name)
listener = MessageHandler(subscriber_name, channel)
self.listener_tasks.append(listener.task)
return listener
def worker(self) -> None:
stream_time, n_seqs = 0, 0
worker_start_time = time.perf_counter()
for (sample, name), sample_start_time, _ in time_iterable(self.sample_iter):
self.worker_q.put(SampleStarted(sample_name=name, # type: ignore
file_names=sample,
t=stream_time,
seconds_elapsed_total=time.perf_counter() - worker_start_time,
sequence=n_seqs))
try:
for (n_seqs, stream_time, n_skipped), \
interval_start_time, interval_elapsed_time in \
time_iterable(self.processor.chunked_process(*sample)):
if self.state is RunState.STOP_SATURATED:
# Saturation is tripped externally: just return immediately.
return
if self.state is RunState.SIGINT:
# If we're interrupted, inform our listeners that something went wrong.
self.worker_q.put(Error(t=stream_time, # type: ignore
sequence=n_seqs,
sample_name=name,
file_names=sample,
error='Process terminated (SIGINT).'))
return
self.worker_q.put(Interval(t=stream_time, # type: ignore
sequence=n_seqs,
sample_name=name,
seconds_elapsed_interval=interval_elapsed_time,
seconds_elapsed_sample=time.perf_counter() - sample_start_time,
seconds_elapsed_total=time.perf_counter() - worker_start_time,
start_time_seconds=interval_start_time,
file_names=sample))
self.processed.add(tuple(sample))
self.worker_q.put(SampleFinished(t=stream_time, # type: ignore
sequence=n_seqs,
sample_name=name,
seconds_elapsed_sample=time.perf_counter() - sample_start_time,
seconds_elapsed_total=time.perf_counter() - worker_start_time,
file_names=sample))
except Exception as e:
self.worker_q.put(Error(t=stream_time, # type: ignore
sequence=n_seqs,
sample_name=name,
file_names=sample,
error="".join(traceback.format_tb(e.__traceback__))))
return
finally:
self.worker_q.put(EndStream(t=stream_time, # type: ignore
seconds_elapsed_total=time.perf_counter() - worker_start_time,
sequence=n_seqs))
#def on_error(self, exception):
# self.worker_q.put(Error(t=self.processor.time_elapsed(),
# sequence=n_seqs,
# sample_name=name,
# error=f'At sequence {self.processor.n_sequences()}: {str(e)}',
# file_names=sample))
# self.state = RunState.STOP_ERROR
async def start(self, extra_tasks = None) -> None:
try:
async with curio.TaskGroup() as g:
# each channel has its own dispatch task
# to send data to its subscribers
for channel_name, channel in self.channels.items():
await g.spawn(channel.dispatch)
# start up AF_UNIX broadcaster if desired
# if self.broadcast_socket is not None:
# await g.spawn(self.broadcaster)
if self.run_echo:
listener = self.add_listener('events_q', 'echo')
async def echo(msg):
mode = 'w' if self.echo_file in ['/dev/stdout', '/dev/stderr'] else 'a'
async with curio.aopen(self.echo_file, mode) as fp:
await fp.write(f'{msg.to_yaml()}\n')
listener.on_message(AllMessages,
echo)
# spawn tasks from listener callbacks
for task in self.listener_tasks:
await g.spawn(task)
# spawn extra tasks to run
if extra_tasks is not None:
for task in extra_tasks:
await g.spawn(task)
# and now we spawn the worker to iterate through
# the processor and wait for it to finish
self.state = RunState.RUNNING
signal.signal(signal.SIGINT, lambda signo, frame: self.interrupt())
# give just a bit of time for the listeners to all spin up
await curio.sleep(0.05)
# then spawn the worker
w = await g.spawn_thread(self.worker)
await w.join()
await curio.sleep(0.05)
await self.worker_subs.kill()
except Exception as e:
print(e, file=sys.stderr)
def stop(self) -> None:
self.state = RunState.STOP
def interrupt(self) -> None:
self.state = RunState.SIGINT
def saturate(self) -> None:
self.state = RunState.STOP_SATURATED
def every_n_intervals(func, n=1):
poller = libgoetia.IntervalCounter(n)
@functools.wraps(func)
async def wrapped(msg, *args, **kwargs):
assert isinstance(msg, Interval)
if poller.poll():
await func(msg, *args, **kwargs)
return wrapped
def every_fib_intervals(func):
def fibs():
a = 1
b = 1
while True:
yield a
a, b = b, a + b
gen = fibs()
fib = next(gen)
ticks = 1
@functools.wraps(func)
async def wrapped(msg, *args, **kwargs):
nonlocal fib, ticks
assert isinstance(msg, Interval)
if ticks >= fib:
await func(msg, *args, **kwargs)
ticks = 1
fib = next(gen)
else:
ticks += 1
return wrapped
def every_exp_intervals(func, r=.08):
def exp_growth(start, r):
t = 0
while True:
yield start * (1.0 + r) ** t
t += 1
gen = exp_growth(1, r)
mark = next(gen)
ticks = 1
@functools.wraps(func)
async def wrapped(msg, *args, **kwargs):
nonlocal mark, ticks
assert isinstance(msg, Interval)
if ticks >= int(mark):
await func(msg, *args, **kwargs)
ticks = 1
mark = next(gen)
else:
ticks += 1
return wrapped
class JSONStreamWriter:
def __init__(self, filename: str):
'''Stream-write JSON data to a file.
Writes a stream of JSON objects to a file. The top-level
element is always a list; list items can be any valid JSON
type.
Args:
filename: Path of the target file to write to.
'''
self.filename = filename
self.n_writes = 0
with open(self.filename, 'w') as fp:
fp.write('[')
atexit.register(self.close)
def close(self):
with open(self.filename, 'a') as fp:
fp.write(']')
atexit.unregister(self.close)
@staticmethod
def _dumps_data(data: Any, expand: bool = True):
buf = ''
if isinstance(data, str):
# assume already valid JSON object
buf = data
elif expand and is_iterable(data) and not isinstance(data, dict):
# extend the top level list rather than
# adding the iterable as an item
buf = ','.join((json.dumps(item) for item in data))
else:
buf = json.dumps(data)
return buf
def write(self, data: Any, expand: bool = True):
'''Write the given data is a JSON element to the stream.
Strings will be written assuming they are already valid JSON;
this could result in malformed JSON, so care must be taken.
Other data types are passed to json.dumps for serialization.
Args:
data: Data to coerce to JSON.
expand: If True, iterables will be expanded into the stream
rather than appended as a single item.
'''
buf = ''
with open(self.filename, 'a') as fp:
if self.n_writes != 0:
fp.write(',\n')
fp.write(self._dumps_data(data, expand=expand))
self.n_writes += 1
class AsyncJSONStreamWriter(JSONStreamWriter):
async def write(self, data: Any, expand: bool = True):
'''Write the given data is a JSON element to the stream.
Strings will be written assuming they are already valid JSON;
this could result in malformed JSON, so care must be taken.
Other data types are passed to json.dumps for serialization.
Args:
data: Data to coerce to JSON.
expand: If True, iterables will be expanded into the stream
rather than appended as a single item.
'''
buf = ''
async with curio.aopen(self.filename, 'a') as fp:
if self.n_writes != 0:
await fp.write(',\n')
await fp.write(self._dumps_data(data, expand=expand))
self.n_writes += 1
| 34.633267
| 112
| 0.538132
|
c8c57ab1f69973f8de3e640b40aaf44adf26f6f4
| 14,174
|
py
|
Python
|
wye/organisations/views.py
|
curioswati-zz/wye
|
4f370a5aeafe81d90aa44adcec50c1f3eb40004e
|
[
"MIT"
] | 1
|
2020-12-22T10:24:31.000Z
|
2020-12-22T10:24:31.000Z
|
wye/organisations/views.py
|
curioswati-zz/wye
|
4f370a5aeafe81d90aa44adcec50c1f3eb40004e
|
[
"MIT"
] | null | null | null |
wye/organisations/views.py
|
curioswati-zz/wye
|
4f370a5aeafe81d90aa44adcec50c1f3eb40004e
|
[
"MIT"
] | null | null | null |
import uuid
from django.conf import settings
from django.conf.urls import patterns, url
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.core.urlresolvers import reverse_lazy
from django.http import Http404
from django.shortcuts import redirect, render
from django.template import Context, loader
from django.views import generic
from braces import views
from django.http.response import HttpResponseRedirect
from wye.base.emailer_html import send_email_to_id, send_email_to_list
from wye.profiles.models import Profile, UserType
from wye.regions.models import RegionalLead
from .forms import (
OrganisationForm, OrganisationMemberAddForm,
UserRegistrationForm
)
from .models import Organisation, User
class OrganisationList(views.LoginRequiredMixin, generic.ListView):
model = Organisation
template_name = 'organisation/list.html'
def dispatch(self, request, *args, **kwargs):
user_profile = Profile.objects.get(
user__id=self.request.user.id)
if not user_profile.is_profile_filled:
return redirect('profiles:profile-edit', slug=request.user.username)
return super(OrganisationList, self).dispatch(request, *args, **kwargs)
def get_queryset(self):
return Organisation.objects.filter(active=True)
def get_context_data(self, *args, **kwargs):
context = super(OrganisationList, self).get_context_data(
*args, **kwargs)
if Profile.is_organiser(self.request.user):
context['org_created_list'] = self.get_queryset().filter(
created_by=self.request.user)
context['org_belongs_list'] = self.get_queryset().exclude(
created_by=self.request.user).filter(
user=self.request.user)
elif Profile.is_regional_lead(self.request.user):
regions = RegionalLead.objects.filter(leads=self.request.user)
context['regional_org_list'] = self.get_queryset().filter(
location__id__in=[x.location.id for x in regions])
context['user'] = self.request.user
# need to improve the part
# context['is_not_tutor'] = False
# as user can be tutor and regional lead hence we need to verify like
# this
if (Profile.is_regional_lead(self.request.user) or
Profile.is_organiser(self.request.user) or
Profile.is_admin(self.request.user)):
context['is_not_tutor'] = True
return context
class OrganisationCreate(views.LoginRequiredMixin, generic.CreateView):
model = Organisation
form_class = OrganisationForm
template_name = 'organisation/create.html'
success_url = reverse_lazy('organisations:organisation_list')
def dispatch(self, request, *args, **kwargs):
user_profile = Profile.objects.get(
user__id=self.request.user.id)
if not user_profile.is_profile_filled:
return redirect('profiles:profile-edit', slug=request.user.username)
if not user_profile.can_create_organisation:
msg = '''Exceed number of organisaiton registration.
Use contact us form to connect to co-ordinators'''
return render(request, 'error.html', {'message': msg})
return super(OrganisationCreate, self).dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
form = OrganisationForm(data=request.POST)
if form.is_valid():
form.instance.modified_by = request.user
form.instance.created_by = request.user
form.instance.save()
form.instance.user.add(request.user)
form.instance.save()
user_profile = Profile.objects.get(
user__id=self.request.user.id)
if not ('poc' in user_profile.get_user_type):
poc_type = UserType.objects.get(slug='poc')
user_profile.usertype.add(poc_type)
user_profile.save()
host = '{}://{}'.format(settings.SITE_PROTOCOL,
request.META['HTTP_HOST'])
email_context = Context({
'full_name': '%s %s' % (request.user.first_name,
request.user.last_name),
'org_id': form.instance.id,
'host': host
})
subject = "%s organisation for region %s is created" % (
form.instance.name, form.instance.location.name)
email_body = loader.get_template(
'email_messages/organisation/new.html').render(email_context)
text_body = loader.get_template(
'email_messages/organisation/new.txt').render(email_context)
regional_lead = Profile.objects.filter(
interested_locations=form.instance.location,
usertype__slug='lead').values_list('user__email', flat=True)
send_email_to_id(subject,
body=email_body,
email_id=request.user.email,
text_body=text_body)
send_email_to_list(subject,
body=email_body,
users_list=regional_lead,
text_body=text_body)
return HttpResponseRedirect(self.success_url)
else:
return render(request, self.template_name, {'form': form})
class OrganisationDetail(views.LoginRequiredMixin, generic.DetailView):
model = Organisation
template_name = 'organisation/detail.html'
success_url = reverse_lazy('organisations:organisation_list')
def get_queryset(self):
return Organisation.objects.filter(
user=self.request.user,
id=self.kwargs['pk'])
class OrganisationUpdate(views.LoginRequiredMixin, generic.UpdateView):
model = Organisation
form_class = OrganisationForm
template_name = 'organisation/edit.html'
success_url = reverse_lazy('organisations:organisation_list')
def get_object(self, queryset=None):
org = Organisation.objects.get(user=self.request.user, id=self.kwargs['pk'])
if org.created_by == self.request.user:
return Organisation.objects.get(user=self.request.user, id=self.kwargs['pk'])
else:
self.template_name = "403.html"
class OrganisationMemberAdd(views.LoginRequiredMixin, generic.UpdateView):
model = Organisation
form_class = OrganisationMemberAddForm
template_name = 'organisation/member-add.html'
success_url = reverse_lazy('organisations:organisation_list')
def get_username(self, email):
"""
Returns a UUID-based 'random' and unique username.
This is required data for user models with a username field.
"""
uuid_str = str(uuid.uuid4())
username = email.split("@")[0]
uuid_str = uuid_str[:30 - len(username)]
return username + uuid_str
def get_token(self, user, **kwargs):
"""Returns a unique token for the given user"""
return PasswordResetTokenGenerator().make_token(user)
def get_urls(self):
return patterns('',
url(r'^(?P<user_id>[\d]+)-(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/',
view=self.activate_view, name="invitation_register")
)
def post(self, request, *args, **kwargs):
form = OrganisationMemberAddForm(data=request.POST)
if form.is_valid():
existing_user = form.cleaned_data['existing_user']
new_user = form.cleaned_data['new_user']
org = Organisation.objects.get(id=self.kwargs['pk'])
host = '{}://{}'.format(settings.SITE_PROTOCOL,
request.META['HTTP_HOST'])
context = {
'full_name': '%s %s' % (request.user.first_name,
request.user.last_name),
'org_name': org.name,
'host': host
}
if existing_user:
# add user to organisation
user = existing_user
org.user.add(user)
org.save()
# set email user's name in context
context['new_member_name'] = '%s %s' % (user.first_name,
user.last_name)
email_context = Context(context)
# send mail to user being added
subject = "You are added in %s organisation" % (
org.location.name)
email_body = loader.get_template(
'email_messages/organisation/to_new_member_existing.html').render(
email_context)
text_body = loader.get_template(
'email_messages/organisation/to_new_member_existing.txt').render(email_context)
send_email_to_id(subject,
body=email_body,
email_id=user.email,
text_body=text_body)
elif new_user:
# generate a random password
random_password = User.objects.make_random_password()
# create a user with the email from form
user = User(username=self.get_username(new_user),
email=new_user,
password=random_password)
# user is inactive initialy
user.is_active = False
user.save()
# add the user to organisation
org.user.add(user.id)
org.save()
# set the email context, the token will be used to generate a unique varification
# link
token = self.get_token(user)
context['new_member_name'] = '%s' % (user.email)
context['token'] = token
context['user'] = user
email_context = Context(context)
# set the meta
subject = "[Python Express]:You are added in %s organisation" % (
org.location.name)
email_body = loader.get_template(
'email_messages/organisation/to_new_member.html').render(email_context)
text_body = loader.get_template(
'email_messages/organisation/to_new_member.txt').render(email_context)
# send the mail to new user
send_email_to_id(subject,
body=email_body,
email_id=new_user,
text_body=text_body)
# These mails will be sent in both cases.
subject = "user %s %s added in %s organisation" % (
user.first_name, user.last_name, org.location.name)
email_body = loader.get_template(
'email_messages/organisation/member_addition_to_user.html').render(
email_context)
text_body = loader.get_template(
'email_messages/organisation/member_addition_to_user.txt').render(
email_context)
# send mail to the user who added the new member
send_email_to_id(subject,
body=email_body,
email_id=request.user.email,
text_body=text_body)
regional_lead = Profile.objects.filter(
interested_locations=org.location,
usertype__slug='lead').values_list('user__email', flat=True)
email_body = loader.get_template(
'email_messages/organisation/member_addition_to_lead.html').render(
email_context)
text_body = loader.get_template(
'email_messages/organisation/member_addition_to_lead.txt').render(
email_context)
# send mail to the regional leads
send_email_to_list(subject,
body=email_body,
users_list=regional_lead,
text_body=text_body)
return HttpResponseRedirect(self.success_url)
else:
return render(request, self.template_name, {'form': form})
def activate_view(request, user_id, token):
"""
View function that activates the given User by setting `is_active` to
true if the provided information is verified.
"""
try:
user = User.objects.get(id=user_id, is_active=False)
except(User.DoesNotExist):
raise Http404("Your URL may have expired.")
if not PasswordResetTokenGenerator().check_token(user, token):
raise Http404("Your URL may have expired.")
form = UserRegistrationForm(data=request.POST or None, instance=user)
if form.is_valid():
user.is_active = True
user.username = form.cleaned_data['username']
user.first_name = form.cleaned_data['first_name']
user.last_name = form.cleaned_data['last_name']
user.set_password(form.cleaned_data['password'])
user.save()
return redirect(reverse_lazy('organisations:organisation_list'))
else:
return render(request, 'organisation/register_form.html',
{'form': form})
class OrganisationDeactive(views.CsrfExemptMixin,
views.LoginRequiredMixin,
views.JSONResponseMixin,
generic.UpdateView):
model = Organisation
fields = ('active', 'id')
def get_object(self, queryset=None):
return Organisation.objects.get(user=self.request.user,
id=self.kwargs['pk'])
def post(self, request, *args, **kwargs):
self.object = self.get_object()
response = self.object.toggle_active(request.user, **kwargs)
return self.render_json_response(response)
| 41.084058
| 99
| 0.592352
|
7d86597527c9b22c6f8c387610263d3ad329cbea
| 896
|
py
|
Python
|
tests/integration/spm/test_info.py
|
exe01/salt
|
0e4e8a458afc120a149eab83e5b9389c474fedf7
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/spm/test_info.py
|
exe01/salt
|
0e4e8a458afc120a149eab83e5b9389c474fedf7
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/spm/test_info.py
|
exe01/salt
|
0e4e8a458afc120a149eab83e5b9389c474fedf7
|
[
"Apache-2.0"
] | null | null | null |
"""
Tests for the spm info utility
"""
import shutil
import pytest
from tests.support.case import SPMCase
from tests.support.helpers import destructiveTest, slowTest
@destructiveTest
@pytest.mark.windows_whitelisted
class SPMInfoTest(SPMCase):
"""
Validate the spm info command
"""
def setUp(self):
self.config = self._spm_config()
self._spm_build_files(self.config)
@slowTest
def test_spm_info(self):
"""
test spm build
"""
self._spm_create_update_repo(self.config)
install = self.run_spm("install", self.config, "apache")
get_info = self.run_spm("info", self.config, "apache")
check_info = ["Supported OSes", "Supported OS", "installing Apache"]
for info in check_info:
self.assertIn(info, "".join(get_info))
def tearDown(self):
shutil.rmtree(self._tmp_spm)
| 23.578947
| 76
| 0.654018
|
f1c1bcb28a078a8956e55586400faae757408a5b
| 95
|
py
|
Python
|
wagtail/admin/signals.py
|
evildmp/wagtail
|
d30856d1b9a8071a2fd9341a3dd9dc20c738e23b
|
[
"BSD-3-Clause"
] | 2
|
2021-03-18T21:41:05.000Z
|
2021-03-18T21:41:08.000Z
|
wagtail/admin/signals.py
|
evildmp/wagtail
|
d30856d1b9a8071a2fd9341a3dd9dc20c738e23b
|
[
"BSD-3-Clause"
] | 13
|
2015-05-08T12:27:10.000Z
|
2020-01-23T14:45:57.000Z
|
wagtail/admin/signals.py
|
evildmp/wagtail
|
d30856d1b9a8071a2fd9341a3dd9dc20c738e23b
|
[
"BSD-3-Clause"
] | 2
|
2020-09-03T20:12:32.000Z
|
2021-03-29T08:29:23.000Z
|
from django.dispatch import Signal
init_new_page = Signal(providing_args=['page', 'parent'])
| 19
| 57
| 0.768421
|
988480f5f79ec538371da991fa8f860487e1de8f
| 296
|
py
|
Python
|
operators/buildv2-operator/python/pulumi_pulumi_kubernetes_crds_operators_buildv2_operator/__init__.py
|
pulumi/pulumi-kubernetes-crds
|
372c4c0182f6b899af82d6edaad521aa14f22150
|
[
"Apache-2.0"
] | null | null | null |
operators/buildv2-operator/python/pulumi_pulumi_kubernetes_crds_operators_buildv2_operator/__init__.py
|
pulumi/pulumi-kubernetes-crds
|
372c4c0182f6b899af82d6edaad521aa14f22150
|
[
"Apache-2.0"
] | 2
|
2020-09-18T17:12:23.000Z
|
2020-12-30T19:40:56.000Z
|
operators/buildv2-operator/python/pulumi_pulumi_kubernetes_crds_operators_buildv2_operator/__init__.py
|
pulumi/pulumi-kubernetes-crds
|
372c4c0182f6b899af82d6edaad521aa14f22150
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by crd2pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from .provider import *
# Make subpackages available:
from . import (
build,
meta_v1,
)
| 22.769231
| 80
| 0.682432
|
545b86de6d86768e9f6ee68ccda3c4dd014faed4
| 3,110
|
py
|
Python
|
script.py
|
ameyjadiye/CoWin-Vaccine-Notifier
|
8dbe13534729a2f5a9ad3ae9ff9eff550426213c
|
[
"MIT"
] | null | null | null |
script.py
|
ameyjadiye/CoWin-Vaccine-Notifier
|
8dbe13534729a2f5a9ad3ae9ff9eff550426213c
|
[
"MIT"
] | null | null | null |
script.py
|
ameyjadiye/CoWin-Vaccine-Notifier
|
8dbe13534729a2f5a9ad3ae9ff9eff550426213c
|
[
"MIT"
] | null | null | null |
'''
Script: Covid Vaccine Slot Availablity Notifier
By Ayushi Rawat
'''
import requests
from pygame import mixer
from datetime import datetime, timedelta
import time
import json
age = 52
pinCodes = ["462003"]
num_days = 2
print_flag = 'Y'
print("Starting search for Covid vaccine slots!")
actual = datetime.today()
list_format = [actual + timedelta(days=i) for i in range(num_days)]
actual_dates = [i.strftime("%d-%m-%Y") for i in list_format]
while True:
counter = 0
for pinCode in pinCodes:
for given_date in actual_dates:
URL = "https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByPin?pincode={}&date={}".format(pinCode, given_date)
header = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36'}
result = requests.get( URL, headers=header )
# print('-------------------------------------------------------------')
# print(result.text)
# print('-------------------------------------------------------------')
if result.ok:
response_json = result.json()
flag = False
if response_json["centers"]:
if(print_flag.lower() =='y'):
for center in response_json["centers"]:
# print('-------------------------------------------------------------')
# print(center)
# print('-------------------------------------------------------------')
for session in center["sessions"]:
if (session["min_age_limit"] <= age and session["available_capacity"] > 0 ) :
print('Pincode: ' + pinCode)
print("Available on: {}".format(given_date))
print("\t", center["name"])
print("\t", center["block_name"])
print("\t Price: ", center["fee_type"])
print("\t Availablity : ", session["available_capacity"])
if(session["vaccine"] != ''):
print("\t Vaccine type: ", session["vaccine"])
print("\n")
counter = counter + 1
else:
pass
else:
pass
else:
print("No Response!")
if(counter == 0):
print("No Vaccination slot avaliable!")
else:
mixer.init()
mixer.music.load('sound/dingdong.wav')
mixer.music.play()
print("Search Completed!")
dt = datetime.now() + timedelta(minutes=3)
while datetime.now() < dt:
time.sleep(1)
| 36.162791
| 148
| 0.417042
|
23bf574290f7b5c1c747dbb357d4ca642a3da17b
| 655
|
py
|
Python
|
mozilla_django_oidc_db/migrations/0005_openidconnectconfig_sync_groups_glob_pattern.py
|
maykinmedia/mozilla-django-oidc-db
|
f576a45ee062370b1e07358769a841898509d37f
|
[
"MIT"
] | null | null | null |
mozilla_django_oidc_db/migrations/0005_openidconnectconfig_sync_groups_glob_pattern.py
|
maykinmedia/mozilla-django-oidc-db
|
f576a45ee062370b1e07358769a841898509d37f
|
[
"MIT"
] | 21
|
2021-07-05T15:18:37.000Z
|
2022-03-30T08:02:17.000Z
|
mozilla_django_oidc_db/migrations/0005_openidconnectconfig_sync_groups_glob_pattern.py
|
maykinmedia/mozilla-django-oidc-db
|
f576a45ee062370b1e07358769a841898509d37f
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.24 on 2021-11-23 14:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("mozilla_django_oidc_db", "0004_auto_20210812_1044"),
]
operations = [
migrations.AddField(
model_name="openidconnectconfig",
name="sync_groups_glob_pattern",
field=models.CharField(
default="*",
help_text="The glob pattern that groups must match to be synchronized to the local database.",
max_length=255,
verbose_name="groups glob pattern",
),
),
]
| 27.291667
| 110
| 0.593893
|
309fb5a8d60bb24cee88aea34585959319d87670
| 3,789
|
py
|
Python
|
django_cassandra_engine/utils.py
|
maria-grigorieva/django_cassandra_engine
|
70918eeb6edd26c50a394a1ddcf6521b92ec429a
|
[
"BSD-2-Clause"
] | null | null | null |
django_cassandra_engine/utils.py
|
maria-grigorieva/django_cassandra_engine
|
70918eeb6edd26c50a394a1ddcf6521b92ec429a
|
[
"BSD-2-Clause"
] | null | null | null |
django_cassandra_engine/utils.py
|
maria-grigorieva/django_cassandra_engine
|
70918eeb6edd26c50a394a1ddcf6521b92ec429a
|
[
"BSD-2-Clause"
] | null | null | null |
import inspect
import cqlengine
import django
from django.conf import settings
class CursorWrapper(object):
"""
Simple CursorWrapper implementation based on django.db.utils.CursorWrapper
"""
def __init__(self, cursor, db):
self.cursor = cursor
self.db = db
WRAP_ERROR_ATTRS = frozenset(['fetchone', 'fetchmany', 'fetchall',
'nextset'])
def __getattr__(self, attr):
cursor_attr = getattr(self.cursor, attr)
if attr in CursorWrapper.WRAP_ERROR_ATTRS:
return self.db.wrap_database_errors(cursor_attr)
else:
return cursor_attr
def __iter__(self):
return iter(self.cursor)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
def callproc(self, procname, params=None):
with self.db.wrap_database_errors:
if params is None:
return self.cursor.callproc(procname)
else:
return self.cursor.callproc(procname, params)
def execute(self, sql, params=None):
with self.db.wrap_database_errors:
if params is None:
return self.cursor.execute(sql)
else:
return self.cursor.execute(sql, params)
def executemany(self, sql, param_list):
with self.db.wrap_database_errors:
return self.cursor.executemany(sql, param_list)
def get_installed_apps():
"""
Return list of all installed apps
"""
if django.VERSION >= (1, 7):
from django.apps import apps
return apps.get_apps()
else:
from django.db import models
return models.get_apps()
def get_cql_models(app, keyspace=None):
"""
:param app: django models module
:param keyspace: database name (keyspace)
:return: list of all cqlengine.Model within app that should be synced to
keyspace.
"""
from cqlengine.models import DEFAULT_KEYSPACE
keyspace = keyspace or DEFAULT_KEYSPACE
models = []
for name, obj in inspect.getmembers(app):
if inspect.isclass(obj) and issubclass(obj, cqlengine.Model) \
and not obj.__abstract__:
if (obj.__keyspace__ is None and keyspace == DEFAULT_KEYSPACE) \
or obj.__keyspace__ == keyspace:
models.append(obj)
return models
def get_cassandra_connections():
"""
:return: List of tuples (db_alias, connection) for all cassandra
connections in DATABASES dict.
"""
from django.db import connections
for alias in connections:
engine = connections[alias].settings_dict.get('ENGINE', '')
if engine == 'django_cassandra_engine':
yield alias, connections[alias]
def get_cassandra_connection(alias=None, name=None):
"""
:return: cassandra connection matching alias or name or just first found.
"""
for _alias, connection in get_cassandra_connections():
if alias is not None:
if alias == _alias:
return connection
elif name is not None:
if name == connection.settings_dict['NAME']:
return connection
else:
return connection
def get_cassandra_db_alias():
from django.db import connections
for alias in connections:
engine = connections[alias].settings_dict.get('ENGINE', '')
if engine == 'django_cassandra_engine':
return alias
def get_engine_from_db_alias(db_alias):
"""
:param db_alias: database alias
:return: database engine from DATABASES dict corresponding to db_alias
or None if db_alias was not found
"""
return settings.DATABASES.get(db_alias, {}).get('ENGINE', None)
| 28.488722
| 78
| 0.631037
|
4ede103a03f44976f9c476143af5a44159fe2b2b
| 18,930
|
py
|
Python
|
pdd25.py
|
multimodallearning/pdd2.5
|
6ca1aca06954528ed32a38dbc849828c4f598b7f
|
[
"Apache-2.0"
] | 10
|
2020-08-13T09:22:21.000Z
|
2022-01-25T15:08:08.000Z
|
pdd25.py
|
multimodallearning/pdd2.5
|
6ca1aca06954528ed32a38dbc849828c4f598b7f
|
[
"Apache-2.0"
] | 1
|
2020-10-27T13:19:48.000Z
|
2021-03-28T09:29:14.000Z
|
pdd25.py
|
multimodallearning/pdd2.5
|
6ca1aca06954528ed32a38dbc849828c4f598b7f
|
[
"Apache-2.0"
] | 1
|
2021-08-04T15:11:15.000Z
|
2021-08-04T15:11:15.000Z
|
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.checkpoint import checkpoint
print(torch.__version__[:3])
if(float(torch.__version__[:3])<1.4):
print("requires pytorch 1.4 or higher")
import matplotlib.pyplot as plt
import nibabel as nib
import numpy as np
import pydicom
import time
import os
import sys
#import MIND implementation from voxelmorph pull request https://github.com/voxelmorph/voxelmorph/pull/145
sys.path.append('voxelmorph/pytorch/')
import losses
print(losses.mind_loss)
#set empty arrays for images and mind features
H = 192; W = 160; D = 256;
imgs = torch.zeros(20,H,W,D)
mindssc = torch.zeros(20,12,H,W,D)
#load affinely pre-aligned "Beyond the Cranial Vault" training scans 1-10, 21-30 (31-40 are reserved for testing)
list_train = torch.cat((torch.arange(10),torch.arange(20,30)),0)+1
for i in range(20):
img_fixed = torch.from_numpy(nib.load('/data/user/AbdomenPreAffine/Training/img/img00'+str(int(list_train[i])).zfill(2)+'.nii.gz').get_data()).float()
imgs[i] = (img_fixed+1000)/500
with torch.no_grad():
mindssc[i] = losses.MINDSSC(imgs[i:i+1].unsqueeze(1).cuda(),3,3).cpu()
def dice_coeff(outputs, labels, max_label):
dice = torch.FloatTensor(max_label-1).fill_(0)
for label_num in range(1, max_label):
iflat = (outputs==label_num).view(-1).float()
tflat = (labels==label_num).view(-1).float()
intersection = torch.mean(iflat * tflat)
dice[label_num-1] = (2. * intersection) / (1e-8 + torch.mean(iflat) + torch.mean(tflat))
return dice
avg5 = nn.AvgPool3d((5,5,5),stride=(1,1,1),padding=(2,2,2)).cuda()
o_m = H//3#H//3
o_n = W//3#W//3
o_o = D//3#D//3
corner = False
#strided grid for Obelisk features
print('numel_o',o_m*o_n*o_o)
ogrid_xyz = F.affine_grid(torch.eye(3,4).unsqueeze(0),(1,1,o_m,o_n,o_o),align_corners=corner).view(1,1,-1,1,3).cuda()
def init_weights(m):
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv3d) or isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
nn.init.xavier_normal(m.weight)
if m.bias is not None:
nn.init.constant(m.bias, 0.0)
def countParameters(model):
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
return params
#unchanged from public referenced code https://github.com/multimodallearning/pdd_net
class OBELISK(nn.Module):
def __init__(self):
super(OBELISK, self).__init__()
channels = 24#16
self.offsets = nn.Parameter(torch.randn(2,channels*2,3)*0.05)
self.layer0 = nn.Conv3d(1, 4, 5, stride=2, bias=False, padding=2)
self.batch0 = nn.BatchNorm3d(4)
self.layer1 = nn.Conv3d(channels*8, channels*4, 1, bias=False, groups=1)
self.batch1 = nn.BatchNorm3d(channels*4)
self.layer2 = nn.Conv3d(channels*4, channels*4, 3, bias=False, padding=1)
self.batch2 = nn.BatchNorm3d(channels*4)
self.layer3 = nn.Conv3d(channels*4, channels*1, 1)
def forward(self, input_img):
img_in = F.avg_pool3d(input_img,3,padding=1,stride=2)
img_in = F.relu(self.batch0(self.layer0(img_in)))
sampled = F.grid_sample(img_in,ogrid_xyz + self.offsets[0,:,:].view(1,-1,1,1,3),align_corners=corner).view(1,-1,o_m,o_n,o_o)
sampled -= F.grid_sample(img_in,ogrid_xyz + self.offsets[1,:,:].view(1,-1,1,1,3),align_corners=corner).view(1,-1,o_m,o_n,o_o)
x = F.relu(self.batch1(self.layer1(sampled)))
x = F.relu(self.batch2(self.layer2(x)))
features = self.layer3(x)
return features
disp_range = 0.4 #q range of displacements (pytorch -1..+1)
displacement_width = 15 #number of steps per dimension
shift_xyz = F.affine_grid(disp_range*torch.eye(3,4).unsqueeze(0),(1,1,displacement_width,displacement_width,displacement_width),align_corners=corner).view(1,1,-1,1,3).cuda()
#_,_,H,W,D = img00.size()
grid_size = 29 #number of control points per dimension
grid_xyz = F.affine_grid(torch.eye(3,4).unsqueeze(0),(1,1,grid_size,grid_size,grid_size),align_corners=corner).view(1,-1,1,1,3).cuda()
net = OBELISK()
print(countParameters(net))
#decomposed displacement label space
shift_x = shift_xyz.view(displacement_width,displacement_width,displacement_width,3)[(displacement_width-1)//2,:,:,:].reshape(1,1,-1,1,3)
shift_y = shift_xyz.view(displacement_width,displacement_width,displacement_width,3)[:,(displacement_width-1)//2,:,:].reshape(1,1,-1,1,3)
shift_z = shift_xyz.view(displacement_width,displacement_width,displacement_width,3)[:,:,(displacement_width-1)//2,:].reshape(1,1,-1,1,3)
shift_2d = torch.cat((shift_x,shift_y,shift_z),3)
class subplanar_pdd(nn.Module):
def __init__(self):
super(subplanar_pdd, self).__init__()
self.alpha = nn.Parameter(torch.Tensor([1,.1,1,1,.1,5]))#1]))#.cuda()
self.pad1 = nn.ReplicationPad3d((0,0,2,2,2,2))#.cuda()
self.avg1 = nn.AvgPool3d((3,3,1),stride=1)#.cuda()
self.max1 = nn.MaxPool3d((3,3,1),stride=1)#.cuda()
self.pad2 = nn.ReplicationPad3d((0,0,2,2,2,2))#.cuda()##
def forward(self, feat00,feat50,shift_2d_min):
#pdd correlation layer with 2.5D decomposition (slightly unrolled)
pdd_cost = torch.zeros(1,grid_size**3,displacement_width,displacement_width,3).cuda()
xyz8 = grid_size**2
for i in range(grid_size):
moving_unfold = F.grid_sample(feat50,grid_xyz[:,i*xyz8:(i+1)*xyz8,:,:,:] + shift_2d_min[:,i*xyz8:(i+1)*xyz8,:,:,:],padding_mode='border',align_corners=corner)
fixed_grid = F.grid_sample(feat00,grid_xyz[:,i*xyz8:(i+1)*xyz8,:,:,:],align_corners=corner)
pdd_cost[:,i*xyz8:(i+1)*xyz8,:,:,:] = self.alpha[1]+self.alpha[0]*torch.sum(torch.pow(fixed_grid-moving_unfold,2),1).view(1,-1,displacement_width,displacement_width,3)
pdd_cost = pdd_cost.view(1,-1,displacement_width,displacement_width,3)
# approximate min convolution / displacement compatibility
cost = (self.avg1(-self.max1(-self.pad1(pdd_cost))))
# grid-based mean field inference (one iteration)
cost_permute = cost.permute(2,3,4,0,1).view(1,3*displacement_width**2,grid_size,grid_size,grid_size)
cost_avg = self.avg1(self.avg1(self.pad2(cost_permute))).permute(0,2,3,4,1).view(1,-1,displacement_width,displacement_width,3)
# second path
cost = self.alpha[4]+self.alpha[2]*pdd_cost+self.alpha[3]*cost_avg
cost = (self.avg1(-self.max1(-self.pad1(cost))))
# grid-based mean field inference (one iteration)
cost_permute = cost.permute(2,3,4,0,1).view(1,3*displacement_width**2,grid_size,grid_size,grid_size)
cost_avg = self.avg1(self.avg1(self.pad2(cost_permute))).permute(0,2,3,4,1).view(grid_size**3,displacement_width**2,3)
#probabilistic and continuous output
cost_soft = F.softmax(-self.alpha[5]*cost_avg,1).view(-1,1,displacement_width,displacement_width,3)
pred_xyz = 0.5*(cost_soft.view(-1,displacement_width**2,3,1)*shift_2d.view(1,displacement_width**2,3,3)).sum(1).sum(1)
return cost_soft,pred_xyz,cost_avg
# GridNet and fit_sub2dense are used for instance optimisation (fitting of 2.5D displacement costs)
class GridNet(nn.Module):
def __init__(self,grid_x,grid_y,grid_z):
super(GridNet, self).__init__()
self.params = nn.Parameter(0.1*torch.randn(1,3,grid_x,grid_y,grid_z))
def forward(self):
return self.params
smooth_hw2 = 3
H2 = H//3; W2 = W//3; D2 = D//3###
def fit_sub2dense(pred_xyz,grid_xyz,cost_avg,alpha,H,W,D,lambda_w=1.5,max_iter=100):
cost2d = F.softmax(-alpha[5]*cost_avg,1).view(-1,1,displacement_width,displacement_width,3)
with torch.enable_grad():
net = GridNet(H2,W2,D2)
net.params.data = pred_xyz.permute(0,4,1,2,3).detach()+torch.randn_like(pred_xyz.permute(0,4,1,2,3))*0.05
net.cuda()
avg5 = nn.AvgPool3d((3,3,3),stride=(1,1,1),padding=(1,1,1)).cuda()
optimizer = optim.Adam(net.parameters(), lr=0.02)
lambda_weight = lambda_w#1.5#5
for iter in range(max_iter):
optimizer.zero_grad()
#second-order B-spline transformation model
fitted_grid = (avg5(avg5(net())))
#resampling transformation network to chosen control point spacing
sampled_net = F.grid_sample(fitted_grid,grid_xyz,align_corners=True).permute(2,0,3,4,1)/disp_range
#sampling the 2.5D displacement probabilities at 3D vectors
sampled_cost = 0.33*F.grid_sample(cost2d[:,:,:,:,0],sampled_net[:,:,:,0,:2],align_corners=True)
sampled_cost += 0.33*F.grid_sample(cost2d[:,:,:,:,1],sampled_net[:,:,:,0,torch.Tensor([0,2]).long()],align_corners=True)
sampled_cost += 0.33*F.grid_sample(cost2d[:,:,:,:,2],sampled_net[:,:,:,0,1:],align_corners=True)
#maximise probabilities
loss = (-sampled_cost).mean()
#minimise diffusion regularisation penalty
reg_loss = lambda_weight*((fitted_grid[0,:,:,1:,:]-fitted_grid[0,:,:,:-1,:])**2).mean()+ lambda_weight*((fitted_grid[0,:,1:,:,:]-fitted_grid[0,:,:-1,:,:])**2).mean()+ lambda_weight*((fitted_grid[0,:,:,:,1:]-fitted_grid[0,:,:,:,:-1])**2).mean()
(reg_loss+loss).backward()
optimizer.step()
#return both low-resolution and high-resolution transformation
dense_flow_fit = F.interpolate(fitted_grid.detach(),size=(H,W,D),mode='trilinear',align_corners=True)
return dense_flow_fit,fitted_grid
#data augmentation
def augmentAffine(img_in, mind_in, strength=0.05):
"""
3D affine augmentation on image and segmentation mini-batch on GPU.
(affine transf. is centered: trilinear interpolation and zero-padding used for sampling)
:input: img_in batch (torch.cuda.FloatTensor), mind_in batch (torch.cuda.FloatTensor)
:return: augmented BxCxTxHxW image batch (torch.cuda.FloatTensor), augmented BxTxHxW seg batch (torch.cuda.LongTensor)
"""
B,C,D,H,W = img_in.size()
affine_matrix = (torch.eye(3,4).unsqueeze(0) + torch.randn(B, 3, 4) * strength).to(img_in.device)
meshgrid = F.affine_grid(affine_matrix,torch.Size((B,1,D,H,W)),align_corners=corner)
img_out = F.grid_sample(img_in, meshgrid,padding_mode='border',align_corners=corner)
mind_out = F.grid_sample(mind_in, meshgrid,padding_mode='border',align_corners=corner)
return img_out, mind_out
# compute jacobian determinant as measure of deformation complexity
def jacobian_determinant_3d(dense_flow):
B,_,H,W,D = dense_flow.size()
dense_pix = dense_flow*(torch.Tensor([H-1,W-1,D-1])/2).view(1,3,1,1,1).to(dense_flow.device)
gradz = nn.Conv3d(3,3,(3,1,1),padding=(1,0,0),bias=False,groups=3)
gradz.weight.data[:,0,:,0,0] = torch.tensor([-0.5,0,0.5]).view(1,3).repeat(3,1)
gradz.to(dense_flow.device)
grady = nn.Conv3d(3,3,(1,3,1),padding=(0,1,0),bias=False,groups=3)
grady.weight.data[:,0,0,:,0] = torch.tensor([-0.5,0,0.5]).view(1,3).repeat(3,1)
grady.to(dense_flow.device)
gradx = nn.Conv3d(3,3,(1,1,3),padding=(0,0,1),bias=False,groups=3)
gradx.weight.data[:,0,0,0,:] = torch.tensor([-0.5,0,0.5]).view(1,3).repeat(3,1)
gradx.to(dense_flow.device)
with torch.no_grad():
jacobian = torch.cat((gradz(dense_pix),grady(dense_pix),gradx(dense_pix)),0)+torch.eye(3,3).view(3,3,1,1,1).to(dense_flow.device)
jacobian = jacobian[:,:,2:-2,2:-2,2:-2]
jac_det = jacobian[0,0,:,:,:]*(jacobian[1,1,:,:,:]*jacobian[2,2,:,:,:]-jacobian[1,2,:,:,:]*jacobian[2,1,:,:,:])- jacobian[1,0,:,:,:]*(jacobian[0,1,:,:,:]*jacobian[2,2,:,:,:]-jacobian[0,2,:,:,:]*jacobian[2,1,:,:,:])+ jacobian[2,0,:,:,:]*(jacobian[0,1,:,:,:]*jacobian[1,2,:,:,:]-jacobian[0,2,:,:,:]*jacobian[1,1,:,:,:])
return jac_det
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_cached()
torch.cuda.reset_max_memory_allocated()
#initialise trainable network parts
reg2d = subplanar_pdd()
reg2d.cuda()
net = OBELISK()
net.apply(init_weights)
net.cuda()
net.train()
#set-up 2D offsets for multi-step 2.5D estimation
shift_2d_min = shift_2d.repeat(1,grid_size**3,1,1,1)
shift_2d_min.requires_grad = False
#train using Adam with weight decay and exponential LR decay
optimizer = optim.AdamW(list(net.parameters())+list(reg2d.parameters()),lr=0.005)
scheduler = optim.lr_scheduler.ExponentialLR(optimizer,gamma=0.99)
#some running metrics
run_mind = np.zeros(1001)
run_diff = np.zeros(1001)
run_dice = torch.zeros(0,13)
run_jac = torch.zeros(0,2)
idx_train = torch.cat((torch.arange(0,10),torch.arange(10,20)),0)
torch.cuda.synchronize()
t0 = time.time()
init_memory0 = torch.cuda.max_memory_allocated()
init_memory1 = torch.cuda.max_memory_cached()
sub_fit = torch.zeros(3,grid_size**3).cuda()
torch.cuda.synchronize()
t1 = time.time()
print('time','%0.3f'%(t1-t0),'sec. init alloc','%0.3f'%(init_memory0*1e-9),'GByte. init cached','%0.3f'%(init_memory1*1e-9))
#run for 1000 iterations / 250 epochs
for i in range(1001):
#select random training pair (mini-batch=4 averaging at the end)
idx = idx_train[torch.randperm(20)][:2]
#fixed scan and MIND features are augmented
img00, mind_aug = augmentAffine(imgs[idx[0:1]].unsqueeze(1).cuda(),,mindssc[idx[0:1],:,:,:].cuda(),0.0375)
img50 = imgs[idx[1:2]].unsqueeze(1).cuda()
#extract obelisk features with channels=24 and stride=3
feat00 = net(img00) #00 is fixed
feat50 = net(img50) #50 is moving
#find initial through-plane offsets (without gradient tacking)
with torch.no_grad():
#run forward path with previous weights
cost_soft2d,pred2d,cost_avg = reg2d(feat00.detach(),feat50.detach(),shift_2d.repeat(1,grid_size**3,1,1,1))
pred2d = pred2d.view(1,grid_size,grid_size,grid_size,3)
#perform instance fit
dense_sub,sub_fit = fit_sub2dense(pred2d.detach(),grid_xyz.detach(),cost_avg.detach(),reg2d.alpha.detach(),H,W,D,5,30)
#slighlty augment the found through-plane offsets
sub_fit2 = sub_fit.view(3,-1) + 0.05*torch.randn(3,grid_size**3).cuda()
shift_2d_min[0,:,:,0,2] = sub_fit2.view(3,-1)[2,:].view(-1,1).repeat(1,displacement_width**2)
shift_2d_min[0,:,:,1,1] = sub_fit2.view(3,-1)[1,:].view(-1,1).repeat(1,displacement_width**2)
shift_2d_min[0,:,:,2,0] = sub_fit2.view(3,-1)[0,:].view(-1,1).repeat(1,displacement_width**2)
shift_2d_min.requires_grad = False
#run 2.5D probabilistic dense displacement (pdd2.5-net)
cost_soft2d,pred2d,cost_avg = reg2d(feat00,feat50,shift_2d_min)
#warm-up phase with stronger regularisation
if(i<100):
lambda_weight_2d = float(torch.linspace(0.75,0.025,100)[i])
else:
lambda_weight_2d = 0.025
pred2d = pred2d.view(1,grid_size,grid_size,grid_size,3)
#diffusion regularisation loss
diffloss = lambda_weight_2d*((pred2d[0,:,1:,:,:]-pred2d[0,:,:-1,:,:])**2).mean()+\
lambda_weight_2d*((pred2d[0,1:,:,:,:]-pred2d[0,:-1,:,:,:])**2).mean()+\
lambda_weight_2d*((pred2d[0,:,:,1:,:]-pred2d[0,:,:,:-1,:])**2).mean()
#nonlocal MIND loss
fixed_mind = F.grid_sample(mind_aug.cuda(),grid_xyz,padding_mode='border',align_corners=corner).detach()#.long().squeeze(1)
moving_unfold = F.grid_sample(mindssc[idx[1:2],:,:,:].cuda(),grid_xyz + shift_2d_min,padding_mode='border',align_corners=corner)
nonlocal_mind = 1/3*torch.sum(moving_unfold*cost_soft2d.view(1,1,-1,displacement_width**2,3),[3,4]).view(1,12,grid_size**3,1,1)
mindloss2d = ((nonlocal_mind-fixed_mind)**2)#*class_weight.view(1,-1,1,1,1)
mindloss = mindloss2d.mean()
run_diff[i] = diffloss.item()
run_mind[i] = mindloss.item()
(diffloss+mindloss).backward()
#implicit mini-batch of 4 (and LR-decay)
if(i%4==0):
optimizer.step()
optimizer.zero_grad()
scheduler.step()
#verbose ON: report some numbers and run inference on (potentially unseen test images)
if(i%5==0):
print(i,time.time()-t1,'mind',mindloss.item(),'diff',diffloss.item())
print(reg2d.alpha)
with torch.no_grad():
feat00 = net(imgs[0:1].unsqueeze(1).cuda())#net(img00)# #00 is fixed
feat50 = net(imgs[1:2].unsqueeze(1).cuda())#net(img50)# #50 is moving
cost_soft2d,pred2d,cost_avg = reg2d(feat00,feat50,shift_2d.repeat(1,grid_size**3,1,1,1))
pred2d = pred2d.view(1,grid_size,grid_size,grid_size,3)
#instance based optimisation / fitting of 2.5D displacement cost
dense_sub,sub_fit = fit_sub2dense(pred2d.detach(),grid_xyz.detach(),cost_avg.detach(),reg2d.alpha.detach(),H,W,D,5,30)
identity = F.affine_grid(torch.eye(3,4).unsqueeze(0).cuda(),(1,1,H,W,D),align_corners=True)
#second refinement step (see Figure 1 right in paper)
shift_2d_min[0,:,:,0,2] = sub_fit.view(3,-1)[2,:].view(-1,1).repeat(1,displacement_width**2)
shift_2d_min[0,:,:,1,1] = sub_fit.view(3,-1)[1,:].view(-1,1).repeat(1,displacement_width**2)
shift_2d_min[0,:,:,2,0] = sub_fit.view(3,-1)[0,:].view(-1,1).repeat(1,displacement_width**2)
shift_2d_min.requires_grad = False
#new dissimilarity planes are computed based on previous fit to reduce approximation error
cost_soft2d,pred2d,cost_avg = reg2d(feat00.detach(),feat50.detach(),shift_2d_min)
pred2d = pred2d.view(1,grid_size,grid_size,grid_size,3)
#instance based optimisation / fitting of 2.5D displacement cost is repeated
dense_sub,sub_fit = fit_sub2dense(pred2d.detach(),grid_xyz.detach(),cost_avg.detach(),reg2d.alpha.detach(),H,W,D,10,30)
#if segmentations are available for some validation/training data, Dice can be computed
#seg_w2d = F.grid_sample(segs[1:2,:,:,:].float().unsqueeze(1).cuda(),identity+dense_sub.permute(0,2,3,4,1),mode='nearest',padding_mode='border',align_corners=True).detach().long().squeeze()
#d2d = dice_coeff(segs[0:1,:,:,:].cuda(),seg_w2d.cuda(),14).cpu()
#print(d2d,d2d.mean())
#run_dice = torch.cat((run_dice,d2d.view(1,-1)),0)
#complexity of transformation and foldings
jacdet = jacobian_determinant_3d(dense_sub)
jac2 = torch.Tensor([torch.std(jacdet.view(-1)),torch.mean((jacdet.view(-1)<0).float())]).cpu()
print(torch.std(jacdet.view(-1)),torch.mean((jacdet.view(-1)<0).float()))
run_jac = torch.cat((run_jac,jac2.view(1,-1)),0)
init_memory0 = torch.cuda.max_memory_allocated()
init_memory1 = torch.cuda.max_memory_cached()
torch.cuda.synchronize()
t2 = time.time()
print('time','%0.3f'%(t2-t1),'sec. back alloc','%0.3f'%(init_memory0*1e-9),' GByte. back cached','%0.3f'%(init_memory1*1e-9))
| 47.80303
| 339
| 0.668463
|
d79e094581554efba97207acb4c008c63492c9b3
| 302
|
py
|
Python
|
tests/diff_test.py
|
Key-Differentiators/cs207-FinalProject
|
d2255a63a4b18a2653812f4c0fa7aa4fe20a18d1
|
[
"MIT"
] | null | null | null |
tests/diff_test.py
|
Key-Differentiators/cs207-FinalProject
|
d2255a63a4b18a2653812f4c0fa7aa4fe20a18d1
|
[
"MIT"
] | 14
|
2019-10-30T18:37:13.000Z
|
2019-12-10T02:04:33.000Z
|
tests/diff_test.py
|
Key-Differentiators/cs207-FinalProject
|
d2255a63a4b18a2653812f4c0fa7aa4fe20a18d1
|
[
"MIT"
] | null | null | null |
# diff_test.py
import pytest
import mock
from keydifferentiator import __main__ as module
def test_diff():
f = '3 * x + 3'
x = 1
val, der = module.diff(f, x)
assert(val == 6)
assert(der == 3)
def test_init():
with mock.patch.object(module, "input", return_value='q'):
module.main()
| 17.764706
| 62
| 0.652318
|
a77071df9b3a648eaf60c4fdb1132890625e5278
| 410,589
|
py
|
Python
|
Lib/pydoc_data/topics.py
|
rbuzatu90/hyperv-python
|
82bf5a72b4d956ea05affe1644b47e378dec0f4e
|
[
"bzip2-1.0.6"
] | 195
|
2016-01-14T16:03:02.000Z
|
2021-12-29T09:15:02.000Z
|
Lib/pydoc_data/topics.py
|
rbuzatu90/hyperv-python
|
82bf5a72b4d956ea05affe1644b47e378dec0f4e
|
[
"bzip2-1.0.6"
] | 75
|
2016-01-14T16:03:02.000Z
|
2020-04-29T22:51:53.000Z
|
Lib/pydoc_data/topics.py
|
rbuzatu90/hyperv-python
|
82bf5a72b4d956ea05affe1644b47e378dec0f4e
|
[
"bzip2-1.0.6"
] | 24
|
2016-02-29T11:45:47.000Z
|
2021-12-24T08:41:37.000Z
|
# -*- coding: utf-8 -*-
# Autogenerated by Sphinx on Tue Nov 25 18:24:45 2014
topics = {'assert': u'\nThe "assert" statement\n**********************\n\nAssert statements are a convenient way to insert debugging assertions\ninto a program:\n\n assert_stmt ::= "assert" expression ["," expression]\n\nThe simple form, "assert expression", is equivalent to\n\n if __debug__:\n if not expression: raise AssertionError\n\nThe extended form, "assert expression1, expression2", is equivalent to\n\n if __debug__:\n if not expression1: raise AssertionError(expression2)\n\nThese equivalences assume that "__debug__" and "AssertionError" refer\nto the built-in variables with those names. In the current\nimplementation, the built-in variable "__debug__" is "True" under\nnormal circumstances, "False" when optimization is requested (command\nline option -O). The current code generator emits no code for an\nassert statement when optimization is requested at compile time. Note\nthat it is unnecessary to include the source code for the expression\nthat failed in the error message; it will be displayed as part of the\nstack trace.\n\nAssignments to "__debug__" are illegal. The value for the built-in\nvariable is determined when the interpreter starts.\n',
'assignment': u'\nAssignment statements\n*********************\n\nAssignment statements are used to (re)bind names to values and to\nmodify attributes or items of mutable objects:\n\n assignment_stmt ::= (target_list "=")+ (expression_list | yield_expression)\n target_list ::= target ("," target)* [","]\n target ::= identifier\n | "(" target_list ")"\n | "[" target_list "]"\n | attributeref\n | subscription\n | slicing\n\n(See section *Primaries* for the syntax definitions for the last three\nsymbols.)\n\nAn assignment statement evaluates the expression list (remember that\nthis can be a single expression or a comma-separated list, the latter\nyielding a tuple) and assigns the single resulting object to each of\nthe target lists, from left to right.\n\nAssignment is defined recursively depending on the form of the target\n(list). When a target is part of a mutable object (an attribute\nreference, subscription or slicing), the mutable object must\nultimately perform the assignment and decide about its validity, and\nmay raise an exception if the assignment is unacceptable. The rules\nobserved by various types and the exceptions raised are given with the\ndefinition of the object types (see section *The standard type\nhierarchy*).\n\nAssignment of an object to a target list is recursively defined as\nfollows.\n\n* If the target list is a single target: The object is assigned to\n that target.\n\n* If the target list is a comma-separated list of targets: The\n object must be an iterable with the same number of items as there\n are targets in the target list, and the items are assigned, from\n left to right, to the corresponding targets.\n\nAssignment of an object to a single target is recursively defined as\nfollows.\n\n* If the target is an identifier (name):\n\n * If the name does not occur in a "global" statement in the\n current code block: the name is bound to the object in the current\n local namespace.\n\n * Otherwise: the name is bound to the object in the current global\n namespace.\n\n The name is rebound if it was already bound. This may cause the\n reference count for the object previously bound to the name to reach\n zero, causing the object to be deallocated and its destructor (if it\n has one) to be called.\n\n* If the target is a target list enclosed in parentheses or in\n square brackets: The object must be an iterable with the same number\n of items as there are targets in the target list, and its items are\n assigned, from left to right, to the corresponding targets.\n\n* If the target is an attribute reference: The primary expression in\n the reference is evaluated. It should yield an object with\n assignable attributes; if this is not the case, "TypeError" is\n raised. That object is then asked to assign the assigned object to\n the given attribute; if it cannot perform the assignment, it raises\n an exception (usually but not necessarily "AttributeError").\n\n Note: If the object is a class instance and the attribute reference\n occurs on both sides of the assignment operator, the RHS expression,\n "a.x" can access either an instance attribute or (if no instance\n attribute exists) a class attribute. The LHS target "a.x" is always\n set as an instance attribute, creating it if necessary. Thus, the\n two occurrences of "a.x" do not necessarily refer to the same\n attribute: if the RHS expression refers to a class attribute, the\n LHS creates a new instance attribute as the target of the\n assignment:\n\n class Cls:\n x = 3 # class variable\n inst = Cls()\n inst.x = inst.x + 1 # writes inst.x as 4 leaving Cls.x as 3\n\n This description does not necessarily apply to descriptor\n attributes, such as properties created with "property()".\n\n* If the target is a subscription: The primary expression in the\n reference is evaluated. It should yield either a mutable sequence\n object (such as a list) or a mapping object (such as a dictionary).\n Next, the subscript expression is evaluated.\n\n If the primary is a mutable sequence object (such as a list), the\n subscript must yield a plain integer. If it is negative, the\n sequence\'s length is added to it. The resulting value must be a\n nonnegative integer less than the sequence\'s length, and the\n sequence is asked to assign the assigned object to its item with\n that index. If the index is out of range, "IndexError" is raised\n (assignment to a subscripted sequence cannot add new items to a\n list).\n\n If the primary is a mapping object (such as a dictionary), the\n subscript must have a type compatible with the mapping\'s key type,\n and the mapping is then asked to create a key/datum pair which maps\n the subscript to the assigned object. This can either replace an\n existing key/value pair with the same key value, or insert a new\n key/value pair (if no key with the same value existed).\n\n* If the target is a slicing: The primary expression in the\n reference is evaluated. It should yield a mutable sequence object\n (such as a list). The assigned object should be a sequence object\n of the same type. Next, the lower and upper bound expressions are\n evaluated, insofar they are present; defaults are zero and the\n sequence\'s length. The bounds should evaluate to (small) integers.\n If either bound is negative, the sequence\'s length is added to it.\n The resulting bounds are clipped to lie between zero and the\n sequence\'s length, inclusive. Finally, the sequence object is asked\n to replace the slice with the items of the assigned sequence. The\n length of the slice may be different from the length of the assigned\n sequence, thus changing the length of the target sequence, if the\n object allows it.\n\n**CPython implementation detail:** In the current implementation, the\nsyntax for targets is taken to be the same as for expressions, and\ninvalid syntax is rejected during the code generation phase, causing\nless detailed error messages.\n\nWARNING: Although the definition of assignment implies that overlaps\nbetween the left-hand side and the right-hand side are \'safe\' (for\nexample "a, b = b, a" swaps two variables), overlaps *within* the\ncollection of assigned-to variables are not safe! For instance, the\nfollowing program prints "[0, 2]":\n\n x = [0, 1]\n i = 0\n i, x[i] = 1, 2\n print x\n\n\nAugmented assignment statements\n===============================\n\nAugmented assignment is the combination, in a single statement, of a\nbinary operation and an assignment statement:\n\n augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression)\n augtarget ::= identifier | attributeref | subscription | slicing\n augop ::= "+=" | "-=" | "*=" | "/=" | "//=" | "%=" | "**="\n | ">>=" | "<<=" | "&=" | "^=" | "|="\n\n(See section *Primaries* for the syntax definitions for the last three\nsymbols.)\n\nAn augmented assignment evaluates the target (which, unlike normal\nassignment statements, cannot be an unpacking) and the expression\nlist, performs the binary operation specific to the type of assignment\non the two operands, and assigns the result to the original target.\nThe target is only evaluated once.\n\nAn augmented assignment expression like "x += 1" can be rewritten as\n"x = x + 1" to achieve a similar, but not exactly equal effect. In the\naugmented version, "x" is only evaluated once. Also, when possible,\nthe actual operation is performed *in-place*, meaning that rather than\ncreating a new object and assigning that to the target, the old object\nis modified instead.\n\nWith the exception of assigning to tuples and multiple targets in a\nsingle statement, the assignment done by augmented assignment\nstatements is handled the same way as normal assignments. Similarly,\nwith the exception of the possible *in-place* behavior, the binary\noperation performed by augmented assignment is the same as the normal\nbinary operations.\n\nFor targets which are attribute references, the same *caveat about\nclass and instance attributes* applies as for regular assignments.\n',
'atom-identifiers': u'\nIdentifiers (Names)\n*******************\n\nAn identifier occurring as an atom is a name. See section\n*Identifiers and keywords* for lexical definition and section *Naming\nand binding* for documentation of naming and binding.\n\nWhen the name is bound to an object, evaluation of the atom yields\nthat object. When a name is not bound, an attempt to evaluate it\nraises a "NameError" exception.\n\n**Private name mangling:** When an identifier that textually occurs in\na class definition begins with two or more underscore characters and\ndoes not end in two or more underscores, it is considered a *private\nname* of that class. Private names are transformed to a longer form\nbefore code is generated for them. The transformation inserts the\nclass name, with leading underscores removed and a single underscore\ninserted, in front of the name. For example, the identifier "__spam"\noccurring in a class named "Ham" will be transformed to "_Ham__spam".\nThis transformation is independent of the syntactical context in which\nthe identifier is used. If the transformed name is extremely long\n(longer than 255 characters), implementation defined truncation may\nhappen. If the class name consists only of underscores, no\ntransformation is done.\n',
'atom-literals': u"\nLiterals\n********\n\nPython supports string literals and various numeric literals:\n\n literal ::= stringliteral | integer | longinteger\n | floatnumber | imagnumber\n\nEvaluation of a literal yields an object of the given type (string,\ninteger, long integer, floating point number, complex number) with the\ngiven value. The value may be approximated in the case of floating\npoint and imaginary (complex) literals. See section *Literals* for\ndetails.\n\nAll literals correspond to immutable data types, and hence the\nobject's identity is less important than its value. Multiple\nevaluations of literals with the same value (either the same\noccurrence in the program text or a different occurrence) may obtain\nthe same object or a different object with the same value.\n",
'attribute-access': u'\nCustomizing attribute access\n****************************\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of "x.name") for\nclass instances.\n\nobject.__getattr__(self, name)\n\n Called when an attribute lookup has not found the attribute in the\n usual places (i.e. it is not an instance attribute nor is it found\n in the class tree for "self"). "name" is the attribute name. This\n method should return the (computed) attribute value or raise an\n "AttributeError" exception.\n\n Note that if the attribute is found through the normal mechanism,\n "__getattr__()" is not called. (This is an intentional asymmetry\n between "__getattr__()" and "__setattr__()".) This is done both for\n efficiency reasons and because otherwise "__getattr__()" would have\n no way to access other attributes of the instance. Note that at\n least for instance variables, you can fake total control by not\n inserting any values in the instance attribute dictionary (but\n instead inserting them in another object). See the\n "__getattribute__()" method below for a way to actually get total\n control in new-style classes.\n\nobject.__setattr__(self, name, value)\n\n Called when an attribute assignment is attempted. This is called\n instead of the normal mechanism (i.e. store the value in the\n instance dictionary). *name* is the attribute name, *value* is the\n value to be assigned to it.\n\n If "__setattr__()" wants to assign to an instance attribute, it\n should not simply execute "self.name = value" --- this would cause\n a recursive call to itself. Instead, it should insert the value in\n the dictionary of instance attributes, e.g., "self.__dict__[name] =\n value". For new-style classes, rather than accessing the instance\n dictionary, it should call the base class method with the same\n name, for example, "object.__setattr__(self, name, value)".\n\nobject.__delattr__(self, name)\n\n Like "__setattr__()" but for attribute deletion instead of\n assignment. This should only be implemented if "del obj.name" is\n meaningful for the object.\n\n\nMore attribute access for new-style classes\n===========================================\n\nThe following methods only apply to new-style classes.\n\nobject.__getattribute__(self, name)\n\n Called unconditionally to implement attribute accesses for\n instances of the class. If the class also defines "__getattr__()",\n the latter will not be called unless "__getattribute__()" either\n calls it explicitly or raises an "AttributeError". This method\n should return the (computed) attribute value or raise an\n "AttributeError" exception. In order to avoid infinite recursion in\n this method, its implementation should always call the base class\n method with the same name to access any attributes it needs, for\n example, "object.__getattribute__(self, name)".\n\n Note: This method may still be bypassed when looking up special\n methods as the result of implicit invocation via language syntax\n or built-in functions. See *Special method lookup for new-style\n classes*.\n\n\nImplementing Descriptors\n========================\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in an\n*owner* class (the descriptor must be in either the owner\'s class\ndictionary or in the class dictionary for one of its parents). In the\nexamples below, "the attribute" refers to the attribute whose name is\nthe key of the property in the owner class\' "__dict__".\n\nobject.__get__(self, instance, owner)\n\n Called to get the attribute of the owner class (class attribute\n access) or of an instance of that class (instance attribute\n access). *owner* is always the owner class, while *instance* is the\n instance that the attribute was accessed through, or "None" when\n the attribute is accessed through the *owner*. This method should\n return the (computed) attribute value or raise an "AttributeError"\n exception.\n\nobject.__set__(self, instance, value)\n\n Called to set the attribute on an instance *instance* of the owner\n class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n Called to delete the attribute on an instance *instance* of the\n owner class.\n\n\nInvoking Descriptors\n====================\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol: "__get__()", "__set__()", and\n"__delete__()". If any of those methods are defined for an object, it\nis said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, "a.x" has a\nlookup chain starting with "a.__dict__[\'x\']", then\n"type(a).__dict__[\'x\']", and continuing through the base classes of\n"type(a)" excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead. Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called. Note that descriptors are only invoked for new\nstyle objects or classes (ones that subclass "object()" or "type()").\n\nThe starting point for descriptor invocation is a binding, "a.x". How\nthe arguments are assembled depends on "a":\n\nDirect Call\n The simplest and least common call is when user code directly\n invokes a descriptor method: "x.__get__(a)".\n\nInstance Binding\n If binding to a new-style object instance, "a.x" is transformed\n into the call: "type(a).__dict__[\'x\'].__get__(a, type(a))".\n\nClass Binding\n If binding to a new-style class, "A.x" is transformed into the\n call: "A.__dict__[\'x\'].__get__(None, A)".\n\nSuper Binding\n If "a" is an instance of "super", then the binding "super(B,\n obj).m()" searches "obj.__class__.__mro__" for the base class "A"\n immediately preceding "B" and then invokes the descriptor with the\n call: "A.__dict__[\'m\'].__get__(obj, obj.__class__)".\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined. A descriptor can define\nany combination of "__get__()", "__set__()" and "__delete__()". If it\ndoes not define "__get__()", then accessing the attribute will return\nthe descriptor object itself unless there is a value in the object\'s\ninstance dictionary. If the descriptor defines "__set__()" and/or\n"__delete__()", it is a data descriptor; if it defines neither, it is\na non-data descriptor. Normally, data descriptors define both\n"__get__()" and "__set__()", while non-data descriptors have just the\n"__get__()" method. Data descriptors with "__set__()" and "__get__()"\ndefined always override a redefinition in an instance dictionary. In\ncontrast, non-data descriptors can be overridden by instances.\n\nPython methods (including "staticmethod()" and "classmethod()") are\nimplemented as non-data descriptors. Accordingly, instances can\nredefine and override methods. This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe "property()" function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n=========\n\nBy default, instances of both old and new-style classes have a\ndictionary for attribute storage. This wastes space for objects\nhaving very few instance variables. The space consumption can become\nacute when creating large numbers of instances.\n\nThe default can be overridden by defining *__slots__* in a new-style\nclass definition. The *__slots__* declaration takes a sequence of\ninstance variables and reserves just enough space in each instance to\nhold a value for each variable. Space is saved because *__dict__* is\nnot created for each instance.\n\n__slots__\n\n This class variable can be assigned a string, iterable, or sequence\n of strings with variable names used by instances. If defined in a\n new-style class, *__slots__* reserves space for the declared\n variables and prevents the automatic creation of *__dict__* and\n *__weakref__* for each instance.\n\n New in version 2.2.\n\nNotes on using *__slots__*\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n attribute of that class will always be accessible, so a *__slots__*\n definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n variables not listed in the *__slots__* definition. Attempts to\n assign to an unlisted variable name raises "AttributeError". If\n dynamic assignment of new variables is desired, then add\n "\'__dict__\'" to the sequence of strings in the *__slots__*\n declaration.\n\n Changed in version 2.3: Previously, adding "\'__dict__\'" to the\n *__slots__* declaration would not enable the assignment of new\n attributes not specifically listed in the sequence of instance\n variable names.\n\n* Without a *__weakref__* variable for each instance, classes\n defining *__slots__* do not support weak references to its\n instances. If weak reference support is needed, then add\n "\'__weakref__\'" to the sequence of strings in the *__slots__*\n declaration.\n\n Changed in version 2.3: Previously, adding "\'__weakref__\'" to the\n *__slots__* declaration would not enable support for weak\n references.\n\n* *__slots__* are implemented at the class level by creating\n descriptors (*Implementing Descriptors*) for each variable name. As\n a result, class attributes cannot be used to set default values for\n instance variables defined by *__slots__*; otherwise, the class\n attribute would overwrite the descriptor assignment.\n\n* The action of a *__slots__* declaration is limited to the class\n where it is defined. As a result, subclasses will have a *__dict__*\n unless they also define *__slots__* (which must only contain names\n of any *additional* slots).\n\n* If a class defines a slot also defined in a base class, the\n instance variable defined by the base class slot is inaccessible\n (except by retrieving its descriptor directly from the base class).\n This renders the meaning of the program undefined. In the future, a\n check may be added to prevent this.\n\n* Nonempty *__slots__* does not work for classes derived from\n "variable-length" built-in types such as "long", "str" and "tuple".\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings\n may also be used; however, in the future, special meaning may be\n assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n *__slots__*.\n\n Changed in version 2.6: Previously, *__class__* assignment raised an\n error if either new or old class had *__slots__*.\n',
'attribute-references': u'\nAttribute references\n********************\n\nAn attribute reference is a primary followed by a period and a name:\n\n attributeref ::= primary "." identifier\n\nThe primary must evaluate to an object of a type that supports\nattribute references, e.g., a module, list, or an instance. This\nobject is then asked to produce the attribute whose name is the\nidentifier. If this attribute is not available, the exception\n"AttributeError" is raised. Otherwise, the type and value of the\nobject produced is determined by the object. Multiple evaluations of\nthe same attribute reference may yield different objects.\n',
'augassign': u'\nAugmented assignment statements\n*******************************\n\nAugmented assignment is the combination, in a single statement, of a\nbinary operation and an assignment statement:\n\n augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression)\n augtarget ::= identifier | attributeref | subscription | slicing\n augop ::= "+=" | "-=" | "*=" | "/=" | "//=" | "%=" | "**="\n | ">>=" | "<<=" | "&=" | "^=" | "|="\n\n(See section *Primaries* for the syntax definitions for the last three\nsymbols.)\n\nAn augmented assignment evaluates the target (which, unlike normal\nassignment statements, cannot be an unpacking) and the expression\nlist, performs the binary operation specific to the type of assignment\non the two operands, and assigns the result to the original target.\nThe target is only evaluated once.\n\nAn augmented assignment expression like "x += 1" can be rewritten as\n"x = x + 1" to achieve a similar, but not exactly equal effect. In the\naugmented version, "x" is only evaluated once. Also, when possible,\nthe actual operation is performed *in-place*, meaning that rather than\ncreating a new object and assigning that to the target, the old object\nis modified instead.\n\nWith the exception of assigning to tuples and multiple targets in a\nsingle statement, the assignment done by augmented assignment\nstatements is handled the same way as normal assignments. Similarly,\nwith the exception of the possible *in-place* behavior, the binary\noperation performed by augmented assignment is the same as the normal\nbinary operations.\n\nFor targets which are attribute references, the same *caveat about\nclass and instance attributes* applies as for regular assignments.\n',
'binary': u'\nBinary arithmetic operations\n****************************\n\nThe binary arithmetic operations have the conventional priority\nlevels. Note that some of these operations also apply to certain non-\nnumeric types. Apart from the power operator, there are only two\nlevels, one for multiplicative operators and one for additive\noperators:\n\n m_expr ::= u_expr | m_expr "*" u_expr | m_expr "//" u_expr | m_expr "/" u_expr\n | m_expr "%" u_expr\n a_expr ::= m_expr | a_expr "+" m_expr | a_expr "-" m_expr\n\nThe "*" (multiplication) operator yields the product of its arguments.\nThe arguments must either both be numbers, or one argument must be an\ninteger (plain or long) and the other must be a sequence. In the\nformer case, the numbers are converted to a common type and then\nmultiplied together. In the latter case, sequence repetition is\nperformed; a negative repetition factor yields an empty sequence.\n\nThe "/" (division) and "//" (floor division) operators yield the\nquotient of their arguments. The numeric arguments are first\nconverted to a common type. Plain or long integer division yields an\ninteger of the same type; the result is that of mathematical division\nwith the \'floor\' function applied to the result. Division by zero\nraises the "ZeroDivisionError" exception.\n\nThe "%" (modulo) operator yields the remainder from the division of\nthe first argument by the second. The numeric arguments are first\nconverted to a common type. A zero right argument raises the\n"ZeroDivisionError" exception. The arguments may be floating point\nnumbers, e.g., "3.14%0.7" equals "0.34" (since "3.14" equals "4*0.7 +\n0.34".) The modulo operator always yields a result with the same sign\nas its second operand (or zero); the absolute value of the result is\nstrictly smaller than the absolute value of the second operand [2].\n\nThe integer division and modulo operators are connected by the\nfollowing identity: "x == (x/y)*y + (x%y)". Integer division and\nmodulo are also connected with the built-in function "divmod()":\n"divmod(x, y) == (x/y, x%y)". These identities don\'t hold for\nfloating point numbers; there similar identities hold approximately\nwhere "x/y" is replaced by "floor(x/y)" or "floor(x/y) - 1" [3].\n\nIn addition to performing the modulo operation on numbers, the "%"\noperator is also overloaded by string and unicode objects to perform\nstring formatting (also known as interpolation). The syntax for string\nformatting is described in the Python Library Reference, section\n*String Formatting Operations*.\n\nDeprecated since version 2.3: The floor division operator, the modulo\noperator, and the "divmod()" function are no longer defined for\ncomplex numbers. Instead, convert to a floating point number using\nthe "abs()" function if appropriate.\n\nThe "+" (addition) operator yields the sum of its arguments. The\narguments must either both be numbers or both sequences of the same\ntype. In the former case, the numbers are converted to a common type\nand then added together. In the latter case, the sequences are\nconcatenated.\n\nThe "-" (subtraction) operator yields the difference of its arguments.\nThe numeric arguments are first converted to a common type.\n',
'bitwise': u'\nBinary bitwise operations\n*************************\n\nEach of the three bitwise operations has a different priority level:\n\n and_expr ::= shift_expr | and_expr "&" shift_expr\n xor_expr ::= and_expr | xor_expr "^" and_expr\n or_expr ::= xor_expr | or_expr "|" xor_expr\n\nThe "&" operator yields the bitwise AND of its arguments, which must\nbe plain or long integers. The arguments are converted to a common\ntype.\n\nThe "^" operator yields the bitwise XOR (exclusive OR) of its\narguments, which must be plain or long integers. The arguments are\nconverted to a common type.\n\nThe "|" operator yields the bitwise (inclusive) OR of its arguments,\nwhich must be plain or long integers. The arguments are converted to\na common type.\n',
'bltin-code-objects': u'\nCode Objects\n************\n\nCode objects are used by the implementation to represent "pseudo-\ncompiled" executable Python code such as a function body. They differ\nfrom function objects because they don\'t contain a reference to their\nglobal execution environment. Code objects are returned by the built-\nin "compile()" function and can be extracted from function objects\nthrough their "func_code" attribute. See also the "code" module.\n\nA code object can be executed or evaluated by passing it (instead of a\nsource string) to the "exec" statement or the built-in "eval()"\nfunction.\n\nSee *The standard type hierarchy* for more information.\n',
'bltin-ellipsis-object': u'\nThe Ellipsis Object\n*******************\n\nThis object is used by extended slice notation (see *Slicings*). It\nsupports no special operations. There is exactly one ellipsis object,\nnamed "Ellipsis" (a built-in name).\n\nIt is written as "Ellipsis". When in a subscript, it can also be\nwritten as "...", for example "seq[...]".\n',
'bltin-null-object': u'\nThe Null Object\n***************\n\nThis object is returned by functions that don\'t explicitly return a\nvalue. It supports no special operations. There is exactly one null\nobject, named "None" (a built-in name).\n\nIt is written as "None".\n',
'bltin-type-objects': u'\nType Objects\n************\n\nType objects represent the various object types. An object\'s type is\naccessed by the built-in function "type()". There are no special\noperations on types. The standard module "types" defines names for\nall standard built-in types.\n\nTypes are written like this: "<type \'int\'>".\n',
'booleans': u'\nBoolean operations\n******************\n\n or_test ::= and_test | or_test "or" and_test\n and_test ::= not_test | and_test "and" not_test\n not_test ::= comparison | "not" not_test\n\nIn the context of Boolean operations, and also when expressions are\nused by control flow statements, the following values are interpreted\nas false: "False", "None", numeric zero of all types, and empty\nstrings and containers (including strings, tuples, lists,\ndictionaries, sets and frozensets). All other values are interpreted\nas true. (See the "__nonzero__()" special method for a way to change\nthis.)\n\nThe operator "not" yields "True" if its argument is false, "False"\notherwise.\n\nThe expression "x and y" first evaluates *x*; if *x* is false, its\nvalue is returned; otherwise, *y* is evaluated and the resulting value\nis returned.\n\nThe expression "x or y" first evaluates *x*; if *x* is true, its value\nis returned; otherwise, *y* is evaluated and the resulting value is\nreturned.\n\n(Note that neither "and" nor "or" restrict the value and type they\nreturn to "False" and "True", but rather return the last evaluated\nargument. This is sometimes useful, e.g., if "s" is a string that\nshould be replaced by a default value if it is empty, the expression\n"s or \'foo\'" yields the desired value. Because "not" has to invent a\nvalue anyway, it does not bother to return a value of the same type as\nits argument, so e.g., "not \'foo\'" yields "False", not "\'\'".)\n',
'break': u'\nThe "break" statement\n*********************\n\n break_stmt ::= "break"\n\n"break" may only occur syntactically nested in a "for" or "while"\nloop, but not nested in a function or class definition within that\nloop.\n\nIt terminates the nearest enclosing loop, skipping the optional "else"\nclause if the loop has one.\n\nIf a "for" loop is terminated by "break", the loop control target\nkeeps its current value.\n\nWhen "break" passes control out of a "try" statement with a "finally"\nclause, that "finally" clause is executed before really leaving the\nloop.\n',
'callable-types': u'\nEmulating callable objects\n**************************\n\nobject.__call__(self[, args...])\n\n Called when the instance is "called" as a function; if this method\n is defined, "x(arg1, arg2, ...)" is a shorthand for\n "x.__call__(arg1, arg2, ...)".\n',
'calls': u'\nCalls\n*****\n\nA call calls a callable object (e.g., a *function*) with a possibly\nempty series of *arguments*:\n\n call ::= primary "(" [argument_list [","]\n | expression genexpr_for] ")"\n argument_list ::= positional_arguments ["," keyword_arguments]\n ["," "*" expression] ["," keyword_arguments]\n ["," "**" expression]\n | keyword_arguments ["," "*" expression]\n ["," "**" expression]\n | "*" expression ["," keyword_arguments] ["," "**" expression]\n | "**" expression\n positional_arguments ::= expression ("," expression)*\n keyword_arguments ::= keyword_item ("," keyword_item)*\n keyword_item ::= identifier "=" expression\n\nA trailing comma may be present after the positional and keyword\narguments but does not affect the semantics.\n\nThe primary must evaluate to a callable object (user-defined\nfunctions, built-in functions, methods of built-in objects, class\nobjects, methods of class instances, and certain class instances\nthemselves are callable; extensions may define additional callable\nobject types). All argument expressions are evaluated before the call\nis attempted. Please refer to section *Function definitions* for the\nsyntax of formal *parameter* lists.\n\nIf keyword arguments are present, they are first converted to\npositional arguments, as follows. First, a list of unfilled slots is\ncreated for the formal parameters. If there are N positional\narguments, they are placed in the first N slots. Next, for each\nkeyword argument, the identifier is used to determine the\ncorresponding slot (if the identifier is the same as the first formal\nparameter name, the first slot is used, and so on). If the slot is\nalready filled, a "TypeError" exception is raised. Otherwise, the\nvalue of the argument is placed in the slot, filling it (even if the\nexpression is "None", it fills the slot). When all arguments have\nbeen processed, the slots that are still unfilled are filled with the\ncorresponding default value from the function definition. (Default\nvalues are calculated, once, when the function is defined; thus, a\nmutable object such as a list or dictionary used as default value will\nbe shared by all calls that don\'t specify an argument value for the\ncorresponding slot; this should usually be avoided.) If there are any\nunfilled slots for which no default value is specified, a "TypeError"\nexception is raised. Otherwise, the list of filled slots is used as\nthe argument list for the call.\n\n**CPython implementation detail:** An implementation may provide\nbuilt-in functions whose positional parameters do not have names, even\nif they are \'named\' for the purpose of documentation, and which\ntherefore cannot be supplied by keyword. In CPython, this is the case\nfor functions implemented in C that use "PyArg_ParseTuple()" to parse\ntheir arguments.\n\nIf there are more positional arguments than there are formal parameter\nslots, a "TypeError" exception is raised, unless a formal parameter\nusing the syntax "*identifier" is present; in this case, that formal\nparameter receives a tuple containing the excess positional arguments\n(or an empty tuple if there were no excess positional arguments).\n\nIf any keyword argument does not correspond to a formal parameter\nname, a "TypeError" exception is raised, unless a formal parameter\nusing the syntax "**identifier" is present; in this case, that formal\nparameter receives a dictionary containing the excess keyword\narguments (using the keywords as keys and the argument values as\ncorresponding values), or a (new) empty dictionary if there were no\nexcess keyword arguments.\n\nIf the syntax "*expression" appears in the function call, "expression"\nmust evaluate to an iterable. Elements from this iterable are treated\nas if they were additional positional arguments; if there are\npositional arguments *x1*, ..., *xN*, and "expression" evaluates to a\nsequence *y1*, ..., *yM*, this is equivalent to a call with M+N\npositional arguments *x1*, ..., *xN*, *y1*, ..., *yM*.\n\nA consequence of this is that although the "*expression" syntax may\nappear *after* some keyword arguments, it is processed *before* the\nkeyword arguments (and the "**expression" argument, if any -- see\nbelow). So:\n\n >>> def f(a, b):\n ... print a, b\n ...\n >>> f(b=1, *(2,))\n 2 1\n >>> f(a=1, *(2,))\n Traceback (most recent call last):\n File "<stdin>", line 1, in ?\n TypeError: f() got multiple values for keyword argument \'a\'\n >>> f(1, *(2,))\n 1 2\n\nIt is unusual for both keyword arguments and the "*expression" syntax\nto be used in the same call, so in practice this confusion does not\narise.\n\nIf the syntax "**expression" appears in the function call,\n"expression" must evaluate to a mapping, the contents of which are\ntreated as additional keyword arguments. In the case of a keyword\nappearing in both "expression" and as an explicit keyword argument, a\n"TypeError" exception is raised.\n\nFormal parameters using the syntax "*identifier" or "**identifier"\ncannot be used as positional argument slots or as keyword argument\nnames. Formal parameters using the syntax "(sublist)" cannot be used\nas keyword argument names; the outermost sublist corresponds to a\nsingle unnamed argument slot, and the argument value is assigned to\nthe sublist using the usual tuple assignment rules after all other\nparameter processing is done.\n\nA call always returns some value, possibly "None", unless it raises an\nexception. How this value is computed depends on the type of the\ncallable object.\n\nIf it is---\n\na user-defined function:\n The code block for the function is executed, passing it the\n argument list. The first thing the code block will do is bind the\n formal parameters to the arguments; this is described in section\n *Function definitions*. When the code block executes a "return"\n statement, this specifies the return value of the function call.\n\na built-in function or method:\n The result is up to the interpreter; see *Built-in Functions* for\n the descriptions of built-in functions and methods.\n\na class object:\n A new instance of that class is returned.\n\na class instance method:\n The corresponding user-defined function is called, with an argument\n list that is one longer than the argument list of the call: the\n instance becomes the first argument.\n\na class instance:\n The class must define a "__call__()" method; the effect is then the\n same as if that method was called.\n',
'class': u'\nClass definitions\n*****************\n\nA class definition defines a class object (see section *The standard\ntype hierarchy*):\n\n classdef ::= "class" classname [inheritance] ":" suite\n inheritance ::= "(" [expression_list] ")"\n classname ::= identifier\n\nA class definition is an executable statement. It first evaluates the\ninheritance list, if present. Each item in the inheritance list\nshould evaluate to a class object or class type which allows\nsubclassing. The class\'s suite is then executed in a new execution\nframe (see section *Naming and binding*), using a newly created local\nnamespace and the original global namespace. (Usually, the suite\ncontains only function definitions.) When the class\'s suite finishes\nexecution, its execution frame is discarded but its local namespace is\nsaved. [4] A class object is then created using the inheritance list\nfor the base classes and the saved local namespace for the attribute\ndictionary. The class name is bound to this class object in the\noriginal local namespace.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass variables; they are shared by all instances. To create instance\nvariables, they can be set in a method with "self.name = value". Both\nclass and instance variables are accessible through the notation\n""self.name"", and an instance variable hides a class variable with\nthe same name when accessed in this way. Class variables can be used\nas defaults for instance variables, but using mutable values there can\nlead to unexpected results. For *new-style class*es, descriptors can\nbe used to create instance variables with different implementation\ndetails.\n\nClass definitions, like function definitions, may be wrapped by one or\nmore *decorator* expressions. The evaluation rules for the decorator\nexpressions are the same as for functions. The result must be a class\nobject, which is then bound to the class name.\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack unless\n there is a "finally" clause which happens to raise another\n exception. That new exception causes the old one to be lost.\n\n[2] Currently, control "flows off the end" except in the case of\n an exception or the execution of a "return", "continue", or\n "break" statement.\n\n[3] A string literal appearing as the first statement in the\n function body is transformed into the function\'s "__doc__"\n attribute and therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n body is transformed into the namespace\'s "__doc__" item and\n therefore the class\'s *docstring*.\n',
'comparisons': u'\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation. Also unlike C, expressions like "a < b < c" have the\ninterpretation that is conventional in mathematics:\n\n comparison ::= or_expr ( comp_operator or_expr )*\n comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "<>" | "!="\n | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: "True" or "False".\n\nComparisons can be chained arbitrarily, e.g., "x < y <= z" is\nequivalent to "x < y and y <= z", except that "y" is evaluated only\nonce (but in both cases "z" is not evaluated at all when "x < y" is\nfound to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then "a op1 b op2 c ... y\nopN z" is equivalent to "a op1 b and b op2 c and ... y opN z", except\nthat each expression is evaluated at most once.\n\nNote that "a op1 b op2 c" doesn\'t imply any kind of comparison between\n*a* and *c*, so that, e.g., "x < y > z" is perfectly legal (though\nperhaps not pretty).\n\nThe forms "<>" and "!=" are equivalent; for consistency with C, "!="\nis preferred; where "!=" is mentioned below "<>" is also accepted.\nThe "<>" spelling is considered obsolescent.\n\nThe operators "<", ">", "==", ">=", "<=", and "!=" compare the values\nof two objects. The objects need not have the same type. If both are\nnumbers, they are converted to a common type. Otherwise, objects of\ndifferent types *always* compare unequal, and are ordered consistently\nbut arbitrarily. You can control comparison behavior of objects of\nnon-built-in types by defining a "__cmp__" method or rich comparison\nmethods like "__gt__", described in section *Special method names*.\n\n(This unusual definition of comparison was used to simplify the\ndefinition of operations like sorting and the "in" and "not in"\noperators. In the future, the comparison rules for objects of\ndifferent types are likely to change.)\n\nComparison of objects of the same type depends on the type:\n\n* Numbers are compared arithmetically.\n\n* Strings are compared lexicographically using the numeric\n equivalents (the result of the built-in function "ord()") of their\n characters. Unicode and 8-bit strings are fully interoperable in\n this behavior. [4]\n\n* Tuples and lists are compared lexicographically using comparison\n of corresponding elements. This means that to compare equal, each\n element must compare equal and the two sequences must be of the same\n type and have the same length.\n\n If not equal, the sequences are ordered the same as their first\n differing elements. For example, "cmp([1,2,x], [1,2,y])" returns\n the same as "cmp(x,y)". If the corresponding element does not\n exist, the shorter sequence is ordered first (for example, "[1,2] <\n [1,2,3]").\n\n* Mappings (dictionaries) compare equal if and only if their sorted\n (key, value) lists compare equal. [5] Outcomes other than equality\n are resolved consistently, but are not otherwise defined. [6]\n\n* Most other objects of built-in types compare unequal unless they\n are the same object; the choice whether one object is considered\n smaller or larger than another one is made arbitrarily but\n consistently within one execution of a program.\n\nThe operators "in" and "not in" test for collection membership. "x in\ns" evaluates to true if *x* is a member of the collection *s*, and\nfalse otherwise. "x not in s" returns the negation of "x in s". The\ncollection membership test has traditionally been bound to sequences;\nan object is a member of a collection if the collection is a sequence\nand contains an element equal to that object. However, it make sense\nfor many other object types to support membership tests without being\na sequence. In particular, dictionaries (for keys) and sets support\nmembership testing.\n\nFor the list and tuple types, "x in y" is true if and only if there\nexists an index *i* such that "x == y[i]" is true.\n\nFor the Unicode and string types, "x in y" is true if and only if *x*\nis a substring of *y*. An equivalent test is "y.find(x) != -1".\nNote, *x* and *y* need not be the same type; consequently, "u\'ab\' in\n\'abc\'" will return "True". Empty strings are always considered to be a\nsubstring of any other string, so """ in "abc"" will return "True".\n\nChanged in version 2.3: Previously, *x* was required to be a string of\nlength "1".\n\nFor user-defined classes which define the "__contains__()" method, "x\nin y" is true if and only if "y.__contains__(x)" is true.\n\nFor user-defined classes which do not define "__contains__()" but do\ndefine "__iter__()", "x in y" is true if some value "z" with "x == z"\nis produced while iterating over "y". If an exception is raised\nduring the iteration, it is as if "in" raised that exception.\n\nLastly, the old-style iteration protocol is tried: if a class defines\n"__getitem__()", "x in y" is true if and only if there is a non-\nnegative integer index *i* such that "x == y[i]", and all lower\ninteger indices do not raise "IndexError" exception. (If any other\nexception is raised, it is as if "in" raised that exception).\n\nThe operator "not in" is defined to have the inverse true value of\n"in".\n\nThe operators "is" and "is not" test for object identity: "x is y" is\ntrue if and only if *x* and *y* are the same object. "x is not y"\nyields the inverse truth value. [7]\n',
'compound': u'\nCompound statements\n*******************\n\nCompound statements contain (groups of) other statements; they affect\nor control the execution of those other statements in some way. In\ngeneral, compound statements span multiple lines, although in simple\nincarnations a whole compound statement may be contained in one line.\n\nThe "if", "while" and "for" statements implement traditional control\nflow constructs. "try" specifies exception handlers and/or cleanup\ncode for a group of statements. Function and class definitions are\nalso syntactically compound statements.\n\nCompound statements consist of one or more \'clauses.\' A clause\nconsists of a header and a \'suite.\' The clause headers of a\nparticular compound statement are all at the same indentation level.\nEach clause header begins with a uniquely identifying keyword and ends\nwith a colon. A suite is a group of statements controlled by a\nclause. A suite can be one or more semicolon-separated simple\nstatements on the same line as the header, following the header\'s\ncolon, or it can be one or more indented statements on subsequent\nlines. Only the latter form of suite can contain nested compound\nstatements; the following is illegal, mostly because it wouldn\'t be\nclear to which "if" clause a following "else" clause would belong:\n\n if test1: if test2: print x\n\nAlso note that the semicolon binds tighter than the colon in this\ncontext, so that in the following example, either all or none of the\n"print" statements are executed:\n\n if x < y < z: print x; print y; print z\n\nSummarizing:\n\n compound_stmt ::= if_stmt\n | while_stmt\n | for_stmt\n | try_stmt\n | with_stmt\n | funcdef\n | classdef\n | decorated\n suite ::= stmt_list NEWLINE | NEWLINE INDENT statement+ DEDENT\n statement ::= stmt_list NEWLINE | compound_stmt\n stmt_list ::= simple_stmt (";" simple_stmt)* [";"]\n\nNote that statements always end in a "NEWLINE" possibly followed by a\n"DEDENT". Also note that optional continuation clauses always begin\nwith a keyword that cannot start a statement, thus there are no\nambiguities (the \'dangling "else"\' problem is solved in Python by\nrequiring nested "if" statements to be indented).\n\nThe formatting of the grammar rules in the following sections places\neach clause on a separate line for clarity.\n\n\nThe "if" statement\n==================\n\nThe "if" statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the "if" statement is executed or evaluated).\nIf all expressions are false, the suite of the "else" clause, if\npresent, is executed.\n\n\nThe "while" statement\n=====================\n\nThe "while" statement is used for repeated execution as long as an\nexpression is true:\n\n while_stmt ::= "while" expression ":" suite\n ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the "else" clause, if present, is executed\nand the loop terminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite. A "continue" statement\nexecuted in the first suite skips the rest of the suite and goes back\nto testing the expression.\n\n\nThe "for" statement\n===================\n\nThe "for" statement is used to iterate over the elements of a sequence\n(such as a string, tuple or list) or other iterable object:\n\n for_stmt ::= "for" target_list "in" expression_list ":" suite\n ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject. An iterator is created for the result of the\n"expression_list". The suite is then executed once for each item\nprovided by the iterator, in the order of ascending indices. Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments, and then the suite is executed. When the items are\nexhausted (which is immediately when the sequence is empty), the suite\nin the "else" clause, if present, is executed, and the loop\nterminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite. A "continue" statement\nexecuted in the first suite skips the rest of the suite and continues\nwith the next item, or with the "else" clause if there was no next\nitem.\n\nThe suite may assign to the variable(s) in the target list; this does\nnot affect the next item assigned to it.\n\nThe target list is not deleted when the loop is finished, but if the\nsequence is empty, it will not have been assigned to at all by the\nloop. Hint: the built-in function "range()" returns a sequence of\nintegers suitable to emulate the effect of Pascal\'s "for i := a to b\ndo"; e.g., "range(3)" returns the list "[0, 1, 2]".\n\nNote: There is a subtlety when the sequence is being modified by the\n loop (this can only occur for mutable sequences, i.e. lists). An\n internal counter is used to keep track of which item is used next,\n and this is incremented on each iteration. When this counter has\n reached the length of the sequence the loop terminates. This means\n that if the suite deletes the current (or a previous) item from the\n sequence, the next item will be skipped (since it gets the index of\n the current item which has already been treated). Likewise, if the\n suite inserts an item in the sequence before the current item, the\n current item will be treated again the next time through the loop.\n This can lead to nasty bugs that can be avoided by making a\n temporary copy using a slice of the whole sequence, e.g.,\n\n for x in a[:]:\n if x < 0: a.remove(x)\n\n\nThe "try" statement\n===================\n\nThe "try" statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n try_stmt ::= try1_stmt | try2_stmt\n try1_stmt ::= "try" ":" suite\n ("except" [expression [("as" | ",") identifier]] ":" suite)+\n ["else" ":" suite]\n ["finally" ":" suite]\n try2_stmt ::= "try" ":" suite\n "finally" ":" suite\n\nChanged in version 2.5: In previous versions of Python,\n"try"..."except"..."finally" did not work. "try"..."except" had to be\nnested in "try"..."finally".\n\nThe "except" clause(s) specify one or more exception handlers. When no\nexception occurs in the "try" clause, no exception handler is\nexecuted. When an exception occurs in the "try" suite, a search for an\nexception handler is started. This search inspects the except clauses\nin turn until one is found that matches the exception. An expression-\nless except clause, if present, must be last; it matches any\nexception. For an except clause with an expression, that expression\nis evaluated, and the clause matches the exception if the resulting\nobject is "compatible" with the exception. An object is compatible\nwith an exception if it is the class or a base class of the exception\nobject, or a tuple containing an item compatible with the exception.\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire "try" statement raised\nthe exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified in that except clause, if present, and the except\nclause\'s suite is executed. All except clauses must have an\nexecutable block. When the end of this block is reached, execution\ncontinues normally after the entire try statement. (This means that\nif two nested handlers exist for the same exception, and the exception\noccurs in the try clause of the inner handler, the outer handler will\nnot handle the exception.)\n\nBefore an except clause\'s suite is executed, details about the\nexception are assigned to three variables in the "sys" module:\n"sys.exc_type" receives the object identifying the exception;\n"sys.exc_value" receives the exception\'s parameter;\n"sys.exc_traceback" receives a traceback object (see section *The\nstandard type hierarchy*) identifying the point in the program where\nthe exception occurred. These details are also available through the\n"sys.exc_info()" function, which returns a tuple "(exc_type,\nexc_value, exc_traceback)". Use of the corresponding variables is\ndeprecated in favor of this function, since their use is unsafe in a\nthreaded program. As of Python 1.5, the variables are restored to\ntheir previous values (before the call) when returning from a function\nthat handled an exception.\n\nThe optional "else" clause is executed if and when control flows off\nthe end of the "try" clause. [2] Exceptions in the "else" clause are\nnot handled by the preceding "except" clauses.\n\nIf "finally" is present, it specifies a \'cleanup\' handler. The "try"\nclause is executed, including any "except" and "else" clauses. If an\nexception occurs in any of the clauses and is not handled, the\nexception is temporarily saved. The "finally" clause is executed. If\nthere is a saved exception, it is re-raised at the end of the\n"finally" clause. If the "finally" clause raises another exception or\nexecutes a "return" or "break" statement, the saved exception is\ndiscarded:\n\n >>> def f():\n ... try:\n ... 1/0\n ... finally:\n ... return 42\n ...\n >>> f()\n 42\n\nThe exception information is not available to the program during\nexecution of the "finally" clause.\n\nWhen a "return", "break" or "continue" statement is executed in the\n"try" suite of a "try"..."finally" statement, the "finally" clause is\nalso executed \'on the way out.\' A "continue" statement is illegal in\nthe "finally" clause. (The reason is a problem with the current\nimplementation --- this restriction may be lifted in the future).\n\nThe return value of a function is determined by the last "return"\nstatement executed. Since the "finally" clause always executes, a\n"return" statement executed in the "finally" clause will always be the\nlast one executed:\n\n >>> def foo():\n ... try:\n ... return \'try\'\n ... finally:\n ... return \'finally\'\n ...\n >>> foo()\n \'finally\'\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information on using the "raise" statement to\ngenerate exceptions may be found in section *The raise statement*.\n\n\nThe "with" statement\n====================\n\nNew in version 2.5.\n\nThe "with" statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section *With Statement\nContext Managers*). This allows common "try"..."except"..."finally"\nusage patterns to be encapsulated for convenient reuse.\n\n with_stmt ::= "with" with_item ("," with_item)* ":" suite\n with_item ::= expression ["as" target]\n\nThe execution of the "with" statement with one "item" proceeds as\nfollows:\n\n1. The context expression (the expression given in the "with_item")\n is evaluated to obtain a context manager.\n\n2. The context manager\'s "__exit__()" is loaded for later use.\n\n3. The context manager\'s "__enter__()" method is invoked.\n\n4. If a target was included in the "with" statement, the return\n value from "__enter__()" is assigned to it.\n\n Note: The "with" statement guarantees that if the "__enter__()"\n method returns without an error, then "__exit__()" will always be\n called. Thus, if an error occurs during the assignment to the\n target list, it will be treated the same as an error occurring\n within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s "__exit__()" method is invoked. If an\n exception caused the suite to be exited, its type, value, and\n traceback are passed as arguments to "__exit__()". Otherwise, three\n "None" arguments are supplied.\n\n If the suite was exited due to an exception, and the return value\n from the "__exit__()" method was false, the exception is reraised.\n If the return value was true, the exception is suppressed, and\n execution continues with the statement following the "with"\n statement.\n\n If the suite was exited for any reason other than an exception, the\n return value from "__exit__()" is ignored, and execution proceeds\n at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple "with" statements were nested:\n\n with A() as a, B() as b:\n suite\n\nis equivalent to\n\n with A() as a:\n with B() as b:\n suite\n\nNote: In Python 2.5, the "with" statement is only allowed when the\n "with_statement" feature has been enabled. It is always enabled in\n Python 2.6.\n\nChanged in version 2.7: Support for multiple context expressions.\n\nSee also: **PEP 0343** - The "with" statement\n\n The specification, background, and examples for the Python "with"\n statement.\n\n\nFunction definitions\n====================\n\nA function definition defines a user-defined function object (see\nsection *The standard type hierarchy*):\n\n decorated ::= decorators (classdef | funcdef)\n decorators ::= decorator+\n decorator ::= "@" dotted_name ["(" [argument_list [","]] ")"] NEWLINE\n funcdef ::= "def" funcname "(" [parameter_list] ")" ":" suite\n dotted_name ::= identifier ("." identifier)*\n parameter_list ::= (defparameter ",")*\n ( "*" identifier ["," "**" identifier]\n | "**" identifier\n | defparameter [","] )\n defparameter ::= parameter ["=" expression]\n sublist ::= parameter ("," parameter)* [","]\n parameter ::= identifier | "(" sublist ")"\n funcname ::= identifier\n\nA function definition is an executable statement. Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function). This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition. The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object. Multiple decorators are applied in\nnested fashion. For example, the following code:\n\n @f1(arg)\n @f2\n def func(): pass\n\nis equivalent to:\n\n def func(): pass\n func = f1(arg)(f2(func))\n\nWhen one or more top-level *parameters* have the form *parameter* "="\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding *argument* may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted. If a parameter has a default value, all following\nparameters must also have a default value --- this is a syntactic\nrestriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated when the function definition\nis executed.** This means that the expression is evaluated once, when\nthe function is defined, and that the same "pre-computed" value is\nused for each call. This is especially important to understand when a\ndefault parameter is a mutable object, such as a list or a dictionary:\nif the function modifies the object (e.g. by appending an item to a\nlist), the default value is in effect modified. This is generally not\nwhat was intended. A way around this is to use "None" as the\ndefault, and explicitly test for it in the body of the function, e.g.:\n\n def whats_on_the_telly(penguin=None):\n if penguin is None:\n penguin = []\n penguin.append("property of the zoo")\n return penguin\n\nFunction call semantics are described in more detail in section\n*Calls*. A function call always assigns values to all parameters\nmentioned in the parameter list, either from position arguments, from\nkeyword arguments, or from default values. If the form\n""*identifier"" is present, it is initialized to a tuple receiving any\nexcess positional parameters, defaulting to the empty tuple. If the\nform ""**identifier"" is present, it is initialized to a new\ndictionary receiving any excess keyword arguments, defaulting to a new\nempty dictionary.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions. This uses lambda\nexpressions, described in section *Lambdas*. Note that the lambda\nexpression is merely a shorthand for a simplified function definition;\na function defined in a ""def"" statement can be passed around or\nassigned to another name just like a function defined by a lambda\nexpression. The ""def"" form is actually more powerful since it\nallows the execution of multiple statements.\n\n**Programmer\'s note:** Functions are first-class objects. A ""def""\nform executed inside a function definition defines a local function\nthat can be returned or passed around. Free variables used in the\nnested function can access the local variables of the function\ncontaining the def. See section *Naming and binding* for details.\n\n\nClass definitions\n=================\n\nA class definition defines a class object (see section *The standard\ntype hierarchy*):\n\n classdef ::= "class" classname [inheritance] ":" suite\n inheritance ::= "(" [expression_list] ")"\n classname ::= identifier\n\nA class definition is an executable statement. It first evaluates the\ninheritance list, if present. Each item in the inheritance list\nshould evaluate to a class object or class type which allows\nsubclassing. The class\'s suite is then executed in a new execution\nframe (see section *Naming and binding*), using a newly created local\nnamespace and the original global namespace. (Usually, the suite\ncontains only function definitions.) When the class\'s suite finishes\nexecution, its execution frame is discarded but its local namespace is\nsaved. [4] A class object is then created using the inheritance list\nfor the base classes and the saved local namespace for the attribute\ndictionary. The class name is bound to this class object in the\noriginal local namespace.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass variables; they are shared by all instances. To create instance\nvariables, they can be set in a method with "self.name = value". Both\nclass and instance variables are accessible through the notation\n""self.name"", and an instance variable hides a class variable with\nthe same name when accessed in this way. Class variables can be used\nas defaults for instance variables, but using mutable values there can\nlead to unexpected results. For *new-style class*es, descriptors can\nbe used to create instance variables with different implementation\ndetails.\n\nClass definitions, like function definitions, may be wrapped by one or\nmore *decorator* expressions. The evaluation rules for the decorator\nexpressions are the same as for functions. The result must be a class\nobject, which is then bound to the class name.\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack unless\n there is a "finally" clause which happens to raise another\n exception. That new exception causes the old one to be lost.\n\n[2] Currently, control "flows off the end" except in the case of\n an exception or the execution of a "return", "continue", or\n "break" statement.\n\n[3] A string literal appearing as the first statement in the\n function body is transformed into the function\'s "__doc__"\n attribute and therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n body is transformed into the namespace\'s "__doc__" item and\n therefore the class\'s *docstring*.\n',
'context-managers': u'\nWith Statement Context Managers\n*******************************\n\nNew in version 2.5.\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a "with" statement. The context manager\nhandles the entry into, and the exit from, the desired runtime context\nfor the execution of the block of code. Context managers are normally\ninvoked using the "with" statement (described in section *The with\nstatement*), but can also be used by directly invoking their methods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see *Context Manager Types*.\n\nobject.__enter__(self)\n\n Enter the runtime context related to this object. The "with"\n statement will bind this method\'s return value to the target(s)\n specified in the "as" clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n Exit the runtime context related to this object. The parameters\n describe the exception that caused the context to be exited. If the\n context was exited without an exception, all three arguments will\n be "None".\n\n If an exception is supplied, and the method wishes to suppress the\n exception (i.e., prevent it from being propagated), it should\n return a true value. Otherwise, the exception will be processed\n normally upon exit from this method.\n\n Note that "__exit__()" methods should not reraise the passed-in\n exception; this is the caller\'s responsibility.\n\nSee also: **PEP 0343** - The "with" statement\n\n The specification, background, and examples for the Python "with"\n statement.\n',
'continue': u'\nThe "continue" statement\n************************\n\n continue_stmt ::= "continue"\n\n"continue" may only occur syntactically nested in a "for" or "while"\nloop, but not nested in a function or class definition or "finally"\nclause within that loop. It continues with the next cycle of the\nnearest enclosing loop.\n\nWhen "continue" passes control out of a "try" statement with a\n"finally" clause, that "finally" clause is executed before really\nstarting the next loop cycle.\n',
'conversions': u'\nArithmetic conversions\n**********************\n\nWhen a description of an arithmetic operator below uses the phrase\n"the numeric arguments are converted to a common type," the arguments\nare coerced using the coercion rules listed at *Coercion rules*. If\nboth arguments are standard numeric types, the following coercions are\napplied:\n\n* If either argument is a complex number, the other is converted to\n complex;\n\n* otherwise, if either argument is a floating point number, the\n other is converted to floating point;\n\n* otherwise, if either argument is a long integer, the other is\n converted to long integer;\n\n* otherwise, both must be plain integers and no conversion is\n necessary.\n\nSome additional rules apply for certain operators (e.g., a string left\nargument to the \'%\' operator). Extensions can define their own\ncoercions.\n',
'customization': u'\nBasic customization\n*******************\n\nobject.__new__(cls[, ...])\n\n Called to create a new instance of class *cls*. "__new__()" is a\n static method (special-cased so you need not declare it as such)\n that takes the class of which an instance was requested as its\n first argument. The remaining arguments are those passed to the\n object constructor expression (the call to the class). The return\n value of "__new__()" should be the new object instance (usually an\n instance of *cls*).\n\n Typical implementations create a new instance of the class by\n invoking the superclass\'s "__new__()" method using\n "super(currentclass, cls).__new__(cls[, ...])" with appropriate\n arguments and then modifying the newly-created instance as\n necessary before returning it.\n\n If "__new__()" returns an instance of *cls*, then the new\n instance\'s "__init__()" method will be invoked like\n "__init__(self[, ...])", where *self* is the new instance and the\n remaining arguments are the same as were passed to "__new__()".\n\n If "__new__()" does not return an instance of *cls*, then the new\n instance\'s "__init__()" method will not be invoked.\n\n "__new__()" is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation. It\n is also commonly overridden in custom metaclasses in order to\n customize class creation.\n\nobject.__init__(self[, ...])\n\n Called when the instance is created. The arguments are those\n passed to the class constructor expression. If a base class has an\n "__init__()" method, the derived class\'s "__init__()" method, if\n any, must explicitly call it to ensure proper initialization of the\n base class part of the instance; for example:\n "BaseClass.__init__(self, [args...])". As a special constraint on\n constructors, no value may be returned; doing so will cause a\n "TypeError" to be raised at runtime.\n\nobject.__del__(self)\n\n Called when the instance is about to be destroyed. This is also\n called a destructor. If a base class has a "__del__()" method, the\n derived class\'s "__del__()" method, if any, must explicitly call it\n to ensure proper deletion of the base class part of the instance.\n Note that it is possible (though not recommended!) for the\n "__del__()" method to postpone destruction of the instance by\n creating a new reference to it. It may then be called at a later\n time when this new reference is deleted. It is not guaranteed that\n "__del__()" methods are called for objects that still exist when\n the interpreter exits.\n\n Note: "del x" doesn\'t directly call "x.__del__()" --- the former\n decrements the reference count for "x" by one, and the latter is\n only called when "x"\'s reference count reaches zero. Some common\n situations that may prevent the reference count of an object from\n going to zero include: circular references between objects (e.g.,\n a doubly-linked list or a tree data structure with parent and\n child pointers); a reference to the object on the stack frame of\n a function that caught an exception (the traceback stored in\n "sys.exc_traceback" keeps the stack frame alive); or a reference\n to the object on the stack frame that raised an unhandled\n exception in interactive mode (the traceback stored in\n "sys.last_traceback" keeps the stack frame alive). The first\n situation can only be remedied by explicitly breaking the cycles;\n the latter two situations can be resolved by storing "None" in\n "sys.exc_traceback" or "sys.last_traceback". Circular references\n which are garbage are detected when the option cycle detector is\n enabled (it\'s on by default), but can only be cleaned up if there\n are no Python-level "__del__()" methods involved. Refer to the\n documentation for the "gc" module for more information about how\n "__del__()" methods are handled by the cycle detector,\n particularly the description of the "garbage" value.\n\n Warning: Due to the precarious circumstances under which\n "__del__()" methods are invoked, exceptions that occur during\n their execution are ignored, and a warning is printed to\n "sys.stderr" instead. Also, when "__del__()" is invoked in\n response to a module being deleted (e.g., when execution of the\n program is done), other globals referenced by the "__del__()"\n method may already have been deleted or in the process of being\n torn down (e.g. the import machinery shutting down). For this\n reason, "__del__()" methods should do the absolute minimum needed\n to maintain external invariants. Starting with version 1.5,\n Python guarantees that globals whose name begins with a single\n underscore are deleted from their module before other globals are\n deleted; if no other references to such globals exist, this may\n help in assuring that imported modules are still available at the\n time when the "__del__()" method is called.\n\n See also the *-R* command-line option.\n\nobject.__repr__(self)\n\n Called by the "repr()" built-in function and by string conversions\n (reverse quotes) to compute the "official" string representation of\n an object. If at all possible, this should look like a valid\n Python expression that could be used to recreate an object with the\n same value (given an appropriate environment). If this is not\n possible, a string of the form "<...some useful description...>"\n should be returned. The return value must be a string object. If a\n class defines "__repr__()" but not "__str__()", then "__repr__()"\n is also used when an "informal" string representation of instances\n of that class is required.\n\n This is typically used for debugging, so it is important that the\n representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n Called by the "str()" built-in function and by the "print"\n statement to compute the "informal" string representation of an\n object. This differs from "__repr__()" in that it does not have to\n be a valid Python expression: a more convenient or concise\n representation may be used instead. The return value must be a\n string object.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n New in version 2.1.\n\n These are the so-called "rich comparison" methods, and are called\n for comparison operators in preference to "__cmp__()" below. The\n correspondence between operator symbols and method names is as\n follows: "x<y" calls "x.__lt__(y)", "x<=y" calls "x.__le__(y)",\n "x==y" calls "x.__eq__(y)", "x!=y" and "x<>y" call "x.__ne__(y)",\n "x>y" calls "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)".\n\n A rich comparison method may return the singleton "NotImplemented"\n if it does not implement the operation for a given pair of\n arguments. By convention, "False" and "True" are returned for a\n successful comparison. However, these methods can return any value,\n so if the comparison operator is used in a Boolean context (e.g.,\n in the condition of an "if" statement), Python will call "bool()"\n on the value to determine if the result is true or false.\n\n There are no implied relationships among the comparison operators.\n The truth of "x==y" does not imply that "x!=y" is false.\n Accordingly, when defining "__eq__()", one should also define\n "__ne__()" so that the operators will behave as expected. See the\n paragraph on "__hash__()" for some important notes on creating\n *hashable* objects which support custom comparison operations and\n are usable as dictionary keys.\n\n There are no swapped-argument versions of these methods (to be used\n when the left argument does not support the operation but the right\n argument does); rather, "__lt__()" and "__gt__()" are each other\'s\n reflection, "__le__()" and "__ge__()" are each other\'s reflection,\n and "__eq__()" and "__ne__()" are their own reflection.\n\n Arguments to rich comparison methods are never coerced.\n\n To automatically generate ordering operations from a single root\n operation, see "functools.total_ordering()".\n\nobject.__cmp__(self, other)\n\n Called by comparison operations if rich comparison (see above) is\n not defined. Should return a negative integer if "self < other",\n zero if "self == other", a positive integer if "self > other". If\n no "__cmp__()", "__eq__()" or "__ne__()" operation is defined,\n class instances are compared by object identity ("address"). See\n also the description of "__hash__()" for some important notes on\n creating *hashable* objects which support custom comparison\n operations and are usable as dictionary keys. (Note: the\n restriction that exceptions are not propagated by "__cmp__()" has\n been removed since Python 1.5.)\n\nobject.__rcmp__(self, other)\n\n Changed in version 2.1: No longer supported.\n\nobject.__hash__(self)\n\n Called by built-in function "hash()" and for operations on members\n of hashed collections including "set", "frozenset", and "dict".\n "__hash__()" should return an integer. The only required property\n is that objects which compare equal have the same hash value; it is\n advised to somehow mix together (e.g. using exclusive or) the hash\n values for the components of the object that also play a part in\n comparison of objects.\n\n If a class does not define a "__cmp__()" or "__eq__()" method it\n should not define a "__hash__()" operation either; if it defines\n "__cmp__()" or "__eq__()" but not "__hash__()", its instances will\n not be usable in hashed collections. If a class defines mutable\n objects and implements a "__cmp__()" or "__eq__()" method, it\n should not implement "__hash__()", since hashable collection\n implementations require that a object\'s hash value is immutable (if\n the object\'s hash value changes, it will be in the wrong hash\n bucket).\n\n User-defined classes have "__cmp__()" and "__hash__()" methods by\n default; with them, all objects compare unequal (except with\n themselves) and "x.__hash__()" returns a result derived from\n "id(x)".\n\n Classes which inherit a "__hash__()" method from a parent class but\n change the meaning of "__cmp__()" or "__eq__()" such that the hash\n value returned is no longer appropriate (e.g. by switching to a\n value-based concept of equality instead of the default identity\n based equality) can explicitly flag themselves as being unhashable\n by setting "__hash__ = None" in the class definition. Doing so\n means that not only will instances of the class raise an\n appropriate "TypeError" when a program attempts to retrieve their\n hash value, but they will also be correctly identified as\n unhashable when checking "isinstance(obj, collections.Hashable)"\n (unlike classes which define their own "__hash__()" to explicitly\n raise "TypeError").\n\n Changed in version 2.5: "__hash__()" may now also return a long\n integer object; the 32-bit integer is then derived from the hash of\n that object.\n\n Changed in version 2.6: "__hash__" may now be set to "None" to\n explicitly flag instances of a class as unhashable.\n\nobject.__nonzero__(self)\n\n Called to implement truth value testing and the built-in operation\n "bool()"; should return "False" or "True", or their integer\n equivalents "0" or "1". When this method is not defined,\n "__len__()" is called, if it is defined, and the object is\n considered true if its result is nonzero. If a class defines\n neither "__len__()" nor "__nonzero__()", all its instances are\n considered true.\n\nobject.__unicode__(self)\n\n Called to implement "unicode()" built-in; should return a Unicode\n object. When this method is not defined, string conversion is\n attempted, and the result of string conversion is converted to\n Unicode using the system default encoding.\n',
'debugger': u'\n"pdb" --- The Python Debugger\n*****************************\n\n**Source code:** Lib/pdb.py\n\n======================================================================\n\nThe module "pdb" defines an interactive source code debugger for\nPython programs. It supports setting (conditional) breakpoints and\nsingle stepping at the source line level, inspection of stack frames,\nsource code listing, and evaluation of arbitrary Python code in the\ncontext of any stack frame. It also supports post-mortem debugging\nand can be called under program control.\n\nThe debugger is extensible --- it is actually defined as the class\n"Pdb". This is currently undocumented but easily understood by reading\nthe source. The extension interface uses the modules "bdb" and "cmd".\n\nThe debugger\'s prompt is "(Pdb)". Typical usage to run a program under\ncontrol of the debugger is:\n\n >>> import pdb\n >>> import mymodule\n >>> pdb.run(\'mymodule.test()\')\n > <string>(0)?()\n (Pdb) continue\n > <string>(1)?()\n (Pdb) continue\n NameError: \'spam\'\n > <string>(1)?()\n (Pdb)\n\n"pdb.py" can also be invoked as a script to debug other scripts. For\nexample:\n\n python -m pdb myscript.py\n\nWhen invoked as a script, pdb will automatically enter post-mortem\ndebugging if the program being debugged exits abnormally. After post-\nmortem debugging (or after normal exit of the program), pdb will\nrestart the program. Automatic restarting preserves pdb\'s state (such\nas breakpoints) and in most cases is more useful than quitting the\ndebugger upon program\'s exit.\n\nNew in version 2.4: Restarting post-mortem behavior added.\n\nThe typical usage to break into the debugger from a running program is\nto insert\n\n import pdb; pdb.set_trace()\n\nat the location you want to break into the debugger. You can then\nstep through the code following this statement, and continue running\nwithout the debugger using the "c" command.\n\nThe typical usage to inspect a crashed program is:\n\n >>> import pdb\n >>> import mymodule\n >>> mymodule.test()\n Traceback (most recent call last):\n File "<stdin>", line 1, in ?\n File "./mymodule.py", line 4, in test\n test2()\n File "./mymodule.py", line 3, in test2\n print spam\n NameError: spam\n >>> pdb.pm()\n > ./mymodule.py(3)test2()\n -> print spam\n (Pdb)\n\nThe module defines the following functions; each enters the debugger\nin a slightly different way:\n\npdb.run(statement[, globals[, locals]])\n\n Execute the *statement* (given as a string) under debugger control.\n The debugger prompt appears before any code is executed; you can\n set breakpoints and type "continue", or you can step through the\n statement using "step" or "next" (all these commands are explained\n below). The optional *globals* and *locals* arguments specify the\n environment in which the code is executed; by default the\n dictionary of the module "__main__" is used. (See the explanation\n of the "exec" statement or the "eval()" built-in function.)\n\npdb.runeval(expression[, globals[, locals]])\n\n Evaluate the *expression* (given as a string) under debugger\n control. When "runeval()" returns, it returns the value of the\n expression. Otherwise this function is similar to "run()".\n\npdb.runcall(function[, argument, ...])\n\n Call the *function* (a function or method object, not a string)\n with the given arguments. When "runcall()" returns, it returns\n whatever the function call returned. The debugger prompt appears\n as soon as the function is entered.\n\npdb.set_trace()\n\n Enter the debugger at the calling stack frame. This is useful to\n hard-code a breakpoint at a given point in a program, even if the\n code is not otherwise being debugged (e.g. when an assertion\n fails).\n\npdb.post_mortem([traceback])\n\n Enter post-mortem debugging of the given *traceback* object. If no\n *traceback* is given, it uses the one of the exception that is\n currently being handled (an exception must be being handled if the\n default is to be used).\n\npdb.pm()\n\n Enter post-mortem debugging of the traceback found in\n "sys.last_traceback".\n\nThe "run*" functions and "set_trace()" are aliases for instantiating\nthe "Pdb" class and calling the method of the same name. If you want\nto access further features, you have to do this yourself:\n\nclass class pdb.Pdb(completekey=\'tab\', stdin=None, stdout=None, skip=None)\n\n "Pdb" is the debugger class.\n\n The *completekey*, *stdin* and *stdout* arguments are passed to the\n underlying "cmd.Cmd" class; see the description there.\n\n The *skip* argument, if given, must be an iterable of glob-style\n module name patterns. The debugger will not step into frames that\n originate in a module that matches one of these patterns. [1]\n\n Example call to enable tracing with *skip*:\n\n import pdb; pdb.Pdb(skip=[\'django.*\']).set_trace()\n\n New in version 2.7: The *skip* argument.\n\n run(statement[, globals[, locals]])\n runeval(expression[, globals[, locals]])\n runcall(function[, argument, ...])\n set_trace()\n\n See the documentation for the functions explained above.\n',
'del': u'\nThe "del" statement\n*******************\n\n del_stmt ::= "del" target_list\n\nDeletion is recursively defined very similar to the way assignment is\ndefined. Rather than spelling it out in full details, here are some\nhints.\n\nDeletion of a target list recursively deletes each target, from left\nto right.\n\nDeletion of a name removes the binding of that name from the local or\nglobal namespace, depending on whether the name occurs in a "global"\nstatement in the same code block. If the name is unbound, a\n"NameError" exception will be raised.\n\nIt is illegal to delete a name from the local namespace if it occurs\nas a free variable in a nested block.\n\nDeletion of attribute references, subscriptions and slicings is passed\nto the primary object involved; deletion of a slicing is in general\nequivalent to assignment of an empty slice of the right type (but even\nthis is determined by the sliced object).\n',
'dict': u'\nDictionary displays\n*******************\n\nA dictionary display is a possibly empty series of key/datum pairs\nenclosed in curly braces:\n\n dict_display ::= "{" [key_datum_list | dict_comprehension] "}"\n key_datum_list ::= key_datum ("," key_datum)* [","]\n key_datum ::= expression ":" expression\n dict_comprehension ::= expression ":" expression comp_for\n\nA dictionary display yields a new dictionary object.\n\nIf a comma-separated sequence of key/datum pairs is given, they are\nevaluated from left to right to define the entries of the dictionary:\neach key object is used as a key into the dictionary to store the\ncorresponding datum. This means that you can specify the same key\nmultiple times in the key/datum list, and the final dictionary\'s value\nfor that key will be the last one given.\n\nA dict comprehension, in contrast to list and set comprehensions,\nneeds two expressions separated with a colon followed by the usual\n"for" and "if" clauses. When the comprehension is run, the resulting\nkey and value elements are inserted in the new dictionary in the order\nthey are produced.\n\nRestrictions on the types of the key values are listed earlier in\nsection *The standard type hierarchy*. (To summarize, the key type\nshould be *hashable*, which excludes all mutable objects.) Clashes\nbetween duplicate keys are not detected; the last datum (textually\nrightmost in the display) stored for a given key value prevails.\n',
'dynamic-features': u'\nInteraction with dynamic features\n*********************************\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- "import *" --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a "SyntaxError".\n\nIf "exec" is used in a function and the function contains or is a\nnested block with free variables, the compiler will raise a\n"SyntaxError" unless the exec explicitly specifies the local namespace\nfor the "exec". (In other words, "exec obj" would be illegal, but\n"exec obj in ns" would be legal.)\n\nThe "eval()", "execfile()", and "input()" functions and the "exec"\nstatement do not have access to the full environment for resolving\nnames. Names may be resolved in the local and global namespaces of\nthe caller. Free variables are not resolved in the nearest enclosing\nnamespace, but in the global namespace. [1] The "exec" statement and\nthe "eval()" and "execfile()" functions have optional arguments to\noverride the global and local namespace. If only one namespace is\nspecified, it is used for both.\n',
'else': u'\nThe "if" statement\n******************\n\nThe "if" statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the "if" statement is executed or evaluated).\nIf all expressions are false, the suite of the "else" clause, if\npresent, is executed.\n',
'exceptions': u'\nExceptions\n**********\n\nExceptions are a means of breaking out of the normal flow of control\nof a code block in order to handle errors or other exceptional\nconditions. An exception is *raised* at the point where the error is\ndetected; it may be *handled* by the surrounding code block or by any\ncode block that directly or indirectly invoked the code block where\nthe error occurred.\n\nThe Python interpreter raises an exception when it detects a run-time\nerror (such as division by zero). A Python program can also\nexplicitly raise an exception with the "raise" statement. Exception\nhandlers are specified with the "try" ... "except" statement. The\n"finally" clause of such a statement can be used to specify cleanup\ncode which does not handle the exception, but is executed whether an\nexception occurred or not in the preceding code.\n\nPython uses the "termination" model of error handling: an exception\nhandler can find out what happened and continue execution at an outer\nlevel, but it cannot repair the cause of the error and retry the\nfailing operation (except by re-entering the offending piece of code\nfrom the top).\n\nWhen an exception is not handled at all, the interpreter terminates\nexecution of the program, or returns to its interactive main loop. In\neither case, it prints a stack backtrace, except when the exception is\n"SystemExit".\n\nExceptions are identified by class instances. The "except" clause is\nselected depending on the class of the instance: it must reference the\nclass of the instance or a base class thereof. The instance can be\nreceived by the handler and can carry additional information about the\nexceptional condition.\n\nExceptions can also be identified by strings, in which case the\n"except" clause is selected by object identity. An arbitrary value\ncan be raised along with the identifying string which can be passed to\nthe handler.\n\nNote: Messages to exceptions are not part of the Python API. Their\n contents may change from one version of Python to the next without\n warning and should not be relied on by code which will run under\n multiple versions of the interpreter.\n\nSee also the description of the "try" statement in section *The try\nstatement* and "raise" statement in section *The raise statement*.\n\n-[ Footnotes ]-\n\n[1] This limitation occurs because the code that is executed by\n these operations is not available at the time the module is\n compiled.\n',
'exec': u'\nThe "exec" statement\n********************\n\n exec_stmt ::= "exec" or_expr ["in" expression ["," expression]]\n\nThis statement supports dynamic execution of Python code. The first\nexpression should evaluate to either a Unicode string, a *Latin-1*\nencoded string, an open file object, a code object, or a tuple. If it\nis a string, the string is parsed as a suite of Python statements\nwhich is then executed (unless a syntax error occurs). [1] If it is an\nopen file, the file is parsed until EOF and executed. If it is a code\nobject, it is simply executed. For the interpretation of a tuple, see\nbelow. In all cases, the code that\'s executed is expected to be valid\nas file input (see section *File input*). Be aware that the "return"\nand "yield" statements may not be used outside of function definitions\neven within the context of code passed to the "exec" statement.\n\nIn all cases, if the optional parts are omitted, the code is executed\nin the current scope. If only the first expression after "in" is\nspecified, it should be a dictionary, which will be used for both the\nglobal and the local variables. If two expressions are given, they\nare used for the global and local variables, respectively. If\nprovided, *locals* can be any mapping object. Remember that at module\nlevel, globals and locals are the same dictionary. If two separate\nobjects are given as *globals* and *locals*, the code will be executed\nas if it were embedded in a class definition.\n\nThe first expression may also be a tuple of length 2 or 3. In this\ncase, the optional parts must be omitted. The form "exec(expr,\nglobals)" is equivalent to "exec expr in globals", while the form\n"exec(expr, globals, locals)" is equivalent to "exec expr in globals,\nlocals". The tuple form of "exec" provides compatibility with Python\n3, where "exec" is a function rather than a statement.\n\nChanged in version 2.4: Formerly, *locals* was required to be a\ndictionary.\n\nAs a side effect, an implementation may insert additional keys into\nthe dictionaries given besides those corresponding to variable names\nset by the executed code. For example, the current implementation may\nadd a reference to the dictionary of the built-in module "__builtin__"\nunder the key "__builtins__" (!).\n\n**Programmer\'s hints:** dynamic evaluation of expressions is supported\nby the built-in function "eval()". The built-in functions "globals()"\nand "locals()" return the current global and local dictionary,\nrespectively, which may be useful to pass around for use by "exec".\n\n-[ Footnotes ]-\n\n[1] Note that the parser only accepts the Unix-style end of line\n convention. If you are reading the code from a file, make sure to\n use *universal newlines* mode to convert Windows or Mac-style\n newlines.\n',
'execmodel': u'\nExecution model\n***************\n\n\nNaming and binding\n==================\n\n*Names* refer to objects. Names are introduced by name binding\noperations. Each occurrence of a name in the program text refers to\nthe *binding* of that name established in the innermost function block\ncontaining the use.\n\nA *block* is a piece of Python program text that is executed as a\nunit. The following are blocks: a module, a function body, and a class\ndefinition. Each command typed interactively is a block. A script\nfile (a file given as standard input to the interpreter or specified\non the interpreter command line the first argument) is a code block.\nA script command (a command specified on the interpreter command line\nwith the \'**-c**\' option) is a code block. The file read by the\nbuilt-in function "execfile()" is a code block. The string argument\npassed to the built-in function "eval()" and to the "exec" statement\nis a code block. The expression read and evaluated by the built-in\nfunction "input()" is a code block.\n\nA code block is executed in an *execution frame*. A frame contains\nsome administrative information (used for debugging) and determines\nwhere and how execution continues after the code block\'s execution has\ncompleted.\n\nA *scope* defines the visibility of a name within a block. If a local\nvariable is defined in a block, its scope includes that block. If the\ndefinition occurs in a function block, the scope extends to any blocks\ncontained within the defining one, unless a contained block introduces\na different binding for the name. The scope of names defined in a\nclass block is limited to the class block; it does not extend to the\ncode blocks of methods -- this includes generator expressions since\nthey are implemented using a function scope. This means that the\nfollowing will fail:\n\n class A:\n a = 42\n b = list(a + i for i in range(10))\n\nWhen a name is used in a code block, it is resolved using the nearest\nenclosing scope. The set of all such scopes visible to a code block\nis called the block\'s *environment*.\n\nIf a name is bound in a block, it is a local variable of that block.\nIf a name is bound at the module level, it is a global variable. (The\nvariables of the module code block are local and global.) If a\nvariable is used in a code block but not defined there, it is a *free\nvariable*.\n\nWhen a name is not found at all, a "NameError" exception is raised.\nIf the name refers to a local variable that has not been bound, a\n"UnboundLocalError" exception is raised. "UnboundLocalError" is a\nsubclass of "NameError".\n\nThe following constructs bind names: formal parameters to functions,\n"import" statements, class and function definitions (these bind the\nclass or function name in the defining block), and targets that are\nidentifiers if occurring in an assignment, "for" loop header, in the\nsecond position of an "except" clause header or after "as" in a "with"\nstatement. The "import" statement of the form "from ... import *"\nbinds all names defined in the imported module, except those beginning\nwith an underscore. This form may only be used at the module level.\n\nA target occurring in a "del" statement is also considered bound for\nthis purpose (though the actual semantics are to unbind the name). It\nis illegal to unbind a name that is referenced by an enclosing scope;\nthe compiler will report a "SyntaxError".\n\nEach assignment or import statement occurs within a block defined by a\nclass or function definition or at the module level (the top-level\ncode block).\n\nIf a name binding operation occurs anywhere within a code block, all\nuses of the name within the block are treated as references to the\ncurrent block. This can lead to errors when a name is used within a\nblock before it is bound. This rule is subtle. Python lacks\ndeclarations and allows name binding operations to occur anywhere\nwithin a code block. The local variables of a code block can be\ndetermined by scanning the entire text of the block for name binding\noperations.\n\nIf the global statement occurs within a block, all uses of the name\nspecified in the statement refer to the binding of that name in the\ntop-level namespace. Names are resolved in the top-level namespace by\nsearching the global namespace, i.e. the namespace of the module\ncontaining the code block, and the builtins namespace, the namespace\nof the module "__builtin__". The global namespace is searched first.\nIf the name is not found there, the builtins namespace is searched.\nThe global statement must precede all uses of the name.\n\nThe builtins namespace associated with the execution of a code block\nis actually found by looking up the name "__builtins__" in its global\nnamespace; this should be a dictionary or a module (in the latter case\nthe module\'s dictionary is used). By default, when in the "__main__"\nmodule, "__builtins__" is the built-in module "__builtin__" (note: no\n\'s\'); when in any other module, "__builtins__" is an alias for the\ndictionary of the "__builtin__" module itself. "__builtins__" can be\nset to a user-created dictionary to create a weak form of restricted\nexecution.\n\n**CPython implementation detail:** Users should not touch\n"__builtins__"; it is strictly an implementation detail. Users\nwanting to override values in the builtins namespace should "import"\nthe "__builtin__" (no \'s\') module and modify its attributes\nappropriately.\n\nThe namespace for a module is automatically created the first time a\nmodule is imported. The main module for a script is always called\n"__main__".\n\nThe "global" statement has the same scope as a name binding operation\nin the same block. If the nearest enclosing scope for a free variable\ncontains a global statement, the free variable is treated as a global.\n\nA class definition is an executable statement that may use and define\nnames. These references follow the normal rules for name resolution.\nThe namespace of the class definition becomes the attribute dictionary\nof the class. Names defined at the class scope are not visible in\nmethods.\n\n\nInteraction with dynamic features\n---------------------------------\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- "import *" --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a "SyntaxError".\n\nIf "exec" is used in a function and the function contains or is a\nnested block with free variables, the compiler will raise a\n"SyntaxError" unless the exec explicitly specifies the local namespace\nfor the "exec". (In other words, "exec obj" would be illegal, but\n"exec obj in ns" would be legal.)\n\nThe "eval()", "execfile()", and "input()" functions and the "exec"\nstatement do not have access to the full environment for resolving\nnames. Names may be resolved in the local and global namespaces of\nthe caller. Free variables are not resolved in the nearest enclosing\nnamespace, but in the global namespace. [1] The "exec" statement and\nthe "eval()" and "execfile()" functions have optional arguments to\noverride the global and local namespace. If only one namespace is\nspecified, it is used for both.\n\n\nExceptions\n==========\n\nExceptions are a means of breaking out of the normal flow of control\nof a code block in order to handle errors or other exceptional\nconditions. An exception is *raised* at the point where the error is\ndetected; it may be *handled* by the surrounding code block or by any\ncode block that directly or indirectly invoked the code block where\nthe error occurred.\n\nThe Python interpreter raises an exception when it detects a run-time\nerror (such as division by zero). A Python program can also\nexplicitly raise an exception with the "raise" statement. Exception\nhandlers are specified with the "try" ... "except" statement. The\n"finally" clause of such a statement can be used to specify cleanup\ncode which does not handle the exception, but is executed whether an\nexception occurred or not in the preceding code.\n\nPython uses the "termination" model of error handling: an exception\nhandler can find out what happened and continue execution at an outer\nlevel, but it cannot repair the cause of the error and retry the\nfailing operation (except by re-entering the offending piece of code\nfrom the top).\n\nWhen an exception is not handled at all, the interpreter terminates\nexecution of the program, or returns to its interactive main loop. In\neither case, it prints a stack backtrace, except when the exception is\n"SystemExit".\n\nExceptions are identified by class instances. The "except" clause is\nselected depending on the class of the instance: it must reference the\nclass of the instance or a base class thereof. The instance can be\nreceived by the handler and can carry additional information about the\nexceptional condition.\n\nExceptions can also be identified by strings, in which case the\n"except" clause is selected by object identity. An arbitrary value\ncan be raised along with the identifying string which can be passed to\nthe handler.\n\nNote: Messages to exceptions are not part of the Python API. Their\n contents may change from one version of Python to the next without\n warning and should not be relied on by code which will run under\n multiple versions of the interpreter.\n\nSee also the description of the "try" statement in section *The try\nstatement* and "raise" statement in section *The raise statement*.\n\n-[ Footnotes ]-\n\n[1] This limitation occurs because the code that is executed by\n these operations is not available at the time the module is\n compiled.\n',
'exprlists': u'\nExpression lists\n****************\n\n expression_list ::= expression ( "," expression )* [","]\n\nAn expression list containing at least one comma yields a tuple. The\nlength of the tuple is the number of expressions in the list. The\nexpressions are evaluated from left to right.\n\nThe trailing comma is required only to create a single tuple (a.k.a. a\n*singleton*); it is optional in all other cases. A single expression\nwithout a trailing comma doesn\'t create a tuple, but rather yields the\nvalue of that expression. (To create an empty tuple, use an empty pair\nof parentheses: "()".)\n',
'floating': u'\nFloating point literals\n***********************\n\nFloating point literals are described by the following lexical\ndefinitions:\n\n floatnumber ::= pointfloat | exponentfloat\n pointfloat ::= [intpart] fraction | intpart "."\n exponentfloat ::= (intpart | pointfloat) exponent\n intpart ::= digit+\n fraction ::= "." digit+\n exponent ::= ("e" | "E") ["+" | "-"] digit+\n\nNote that the integer and exponent parts of floating point numbers can\nlook like octal integers, but are interpreted using radix 10. For\nexample, "077e010" is legal, and denotes the same number as "77e10".\nThe allowed range of floating point literals is implementation-\ndependent. Some examples of floating point literals:\n\n 3.14 10. .001 1e100 3.14e-10 0e0\n\nNote that numeric literals do not include a sign; a phrase like "-1"\nis actually an expression composed of the unary operator "-" and the\nliteral "1".\n',
'for': u'\nThe "for" statement\n*******************\n\nThe "for" statement is used to iterate over the elements of a sequence\n(such as a string, tuple or list) or other iterable object:\n\n for_stmt ::= "for" target_list "in" expression_list ":" suite\n ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject. An iterator is created for the result of the\n"expression_list". The suite is then executed once for each item\nprovided by the iterator, in the order of ascending indices. Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments, and then the suite is executed. When the items are\nexhausted (which is immediately when the sequence is empty), the suite\nin the "else" clause, if present, is executed, and the loop\nterminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite. A "continue" statement\nexecuted in the first suite skips the rest of the suite and continues\nwith the next item, or with the "else" clause if there was no next\nitem.\n\nThe suite may assign to the variable(s) in the target list; this does\nnot affect the next item assigned to it.\n\nThe target list is not deleted when the loop is finished, but if the\nsequence is empty, it will not have been assigned to at all by the\nloop. Hint: the built-in function "range()" returns a sequence of\nintegers suitable to emulate the effect of Pascal\'s "for i := a to b\ndo"; e.g., "range(3)" returns the list "[0, 1, 2]".\n\nNote: There is a subtlety when the sequence is being modified by the\n loop (this can only occur for mutable sequences, i.e. lists). An\n internal counter is used to keep track of which item is used next,\n and this is incremented on each iteration. When this counter has\n reached the length of the sequence the loop terminates. This means\n that if the suite deletes the current (or a previous) item from the\n sequence, the next item will be skipped (since it gets the index of\n the current item which has already been treated). Likewise, if the\n suite inserts an item in the sequence before the current item, the\n current item will be treated again the next time through the loop.\n This can lead to nasty bugs that can be avoided by making a\n temporary copy using a slice of the whole sequence, e.g.,\n\n for x in a[:]:\n if x < 0: a.remove(x)\n',
'formatstrings': u'\nFormat String Syntax\n********************\n\nThe "str.format()" method and the "Formatter" class share the same\nsyntax for format strings (although in the case of "Formatter",\nsubclasses can define their own format string syntax).\n\nFormat strings contain "replacement fields" surrounded by curly braces\n"{}". Anything that is not contained in braces is considered literal\ntext, which is copied unchanged to the output. If you need to include\na brace character in the literal text, it can be escaped by doubling:\n"{{" and "}}".\n\nThe grammar for a replacement field is as follows:\n\n replacement_field ::= "{" [field_name] ["!" conversion] [":" format_spec] "}"\n field_name ::= arg_name ("." attribute_name | "[" element_index "]")*\n arg_name ::= [identifier | integer]\n attribute_name ::= identifier\n element_index ::= integer | index_string\n index_string ::= <any source character except "]"> +\n conversion ::= "r" | "s"\n format_spec ::= <described in the next section>\n\nIn less formal terms, the replacement field can start with a\n*field_name* that specifies the object whose value is to be formatted\nand inserted into the output instead of the replacement field. The\n*field_name* is optionally followed by a *conversion* field, which is\npreceded by an exclamation point "\'!\'", and a *format_spec*, which is\npreceded by a colon "\':\'". These specify a non-default format for the\nreplacement value.\n\nSee also the *Format Specification Mini-Language* section.\n\nThe *field_name* itself begins with an *arg_name* that is either a\nnumber or a keyword. If it\'s a number, it refers to a positional\nargument, and if it\'s a keyword, it refers to a named keyword\nargument. If the numerical arg_names in a format string are 0, 1, 2,\n... in sequence, they can all be omitted (not just some) and the\nnumbers 0, 1, 2, ... will be automatically inserted in that order.\nBecause *arg_name* is not quote-delimited, it is not possible to\nspecify arbitrary dictionary keys (e.g., the strings "\'10\'" or\n"\':-]\'") within a format string. The *arg_name* can be followed by any\nnumber of index or attribute expressions. An expression of the form\n"\'.name\'" selects the named attribute using "getattr()", while an\nexpression of the form "\'[index]\'" does an index lookup using\n"__getitem__()".\n\nChanged in version 2.7: The positional argument specifiers can be\nomitted, so "\'{} {}\'" is equivalent to "\'{0} {1}\'".\n\nSome simple format string examples:\n\n "First, thou shalt count to {0}" # References first positional argument\n "Bring me a {}" # Implicitly references the first positional argument\n "From {} to {}" # Same as "From {0} to {1}"\n "My quest is {name}" # References keyword argument \'name\'\n "Weight in tons {0.weight}" # \'weight\' attribute of first positional arg\n "Units destroyed: {players[0]}" # First element of keyword argument \'players\'.\n\nThe *conversion* field causes a type coercion before formatting.\nNormally, the job of formatting a value is done by the "__format__()"\nmethod of the value itself. However, in some cases it is desirable to\nforce a type to be formatted as a string, overriding its own\ndefinition of formatting. By converting the value to a string before\ncalling "__format__()", the normal formatting logic is bypassed.\n\nTwo conversion flags are currently supported: "\'!s\'" which calls\n"str()" on the value, and "\'!r\'" which calls "repr()".\n\nSome examples:\n\n "Harold\'s a clever {0!s}" # Calls str() on the argument first\n "Bring out the holy {name!r}" # Calls repr() on the argument first\n\nThe *format_spec* field contains a specification of how the value\nshould be presented, including such details as field width, alignment,\npadding, decimal precision and so on. Each value type can define its\nown "formatting mini-language" or interpretation of the *format_spec*.\n\nMost built-in types support a common formatting mini-language, which\nis described in the next section.\n\nA *format_spec* field can also include nested replacement fields\nwithin it. These nested replacement fields can contain only a field\nname; conversion flags and format specifications are not allowed. The\nreplacement fields within the format_spec are substituted before the\n*format_spec* string is interpreted. This allows the formatting of a\nvalue to be dynamically specified.\n\nSee the *Format examples* section for some examples.\n\n\nFormat Specification Mini-Language\n==================================\n\n"Format specifications" are used within replacement fields contained\nwithin a format string to define how individual values are presented\n(see *Format String Syntax*). They can also be passed directly to the\nbuilt-in "format()" function. Each formattable type may define how\nthe format specification is to be interpreted.\n\nMost built-in types implement the following options for format\nspecifications, although some of the formatting options are only\nsupported by the numeric types.\n\nA general convention is that an empty format string ("""") produces\nthe same result as if you had called "str()" on the value. A non-empty\nformat string typically modifies the result.\n\nThe general form of a *standard format specifier* is:\n\n format_spec ::= [[fill]align][sign][#][0][width][,][.precision][type]\n fill ::= <any character>\n align ::= "<" | ">" | "=" | "^"\n sign ::= "+" | "-" | " "\n width ::= integer\n precision ::= integer\n type ::= "b" | "c" | "d" | "e" | "E" | "f" | "F" | "g" | "G" | "n" | "o" | "s" | "x" | "X" | "%"\n\nIf a valid *align* value is specified, it can be preceded by a *fill*\ncharacter that can be any character and defaults to a space if\nomitted. Note that it is not possible to use "{" and "}" as *fill*\nchar while using the "str.format()" method; this limitation however\ndoesn\'t affect the "format()" function.\n\nThe meaning of the various alignment options is as follows:\n\n +-----------+------------------------------------------------------------+\n | Option | Meaning |\n +===========+============================================================+\n | "\'<\'" | Forces the field to be left-aligned within the available |\n | | space (this is the default for most objects). |\n +-----------+------------------------------------------------------------+\n | "\'>\'" | Forces the field to be right-aligned within the available |\n | | space (this is the default for numbers). |\n +-----------+------------------------------------------------------------+\n | "\'=\'" | Forces the padding to be placed after the sign (if any) |\n | | but before the digits. This is used for printing fields |\n | | in the form \'+000000120\'. This alignment option is only |\n | | valid for numeric types. |\n +-----------+------------------------------------------------------------+\n | "\'^\'" | Forces the field to be centered within the available |\n | | space. |\n +-----------+------------------------------------------------------------+\n\nNote that unless a minimum field width is defined, the field width\nwill always be the same size as the data to fill it, so that the\nalignment option has no meaning in this case.\n\nThe *sign* option is only valid for number types, and can be one of\nthe following:\n\n +-----------+------------------------------------------------------------+\n | Option | Meaning |\n +===========+============================================================+\n | "\'+\'" | indicates that a sign should be used for both positive as |\n | | well as negative numbers. |\n +-----------+------------------------------------------------------------+\n | "\'-\'" | indicates that a sign should be used only for negative |\n | | numbers (this is the default behavior). |\n +-----------+------------------------------------------------------------+\n | space | indicates that a leading space should be used on positive |\n | | numbers, and a minus sign on negative numbers. |\n +-----------+------------------------------------------------------------+\n\nThe "\'#\'" option is only valid for integers, and only for binary,\noctal, or hexadecimal output. If present, it specifies that the\noutput will be prefixed by "\'0b\'", "\'0o\'", or "\'0x\'", respectively.\n\nThe "\',\'" option signals the use of a comma for a thousands separator.\nFor a locale aware separator, use the "\'n\'" integer presentation type\ninstead.\n\nChanged in version 2.7: Added the "\',\'" option (see also **PEP 378**).\n\n*width* is a decimal integer defining the minimum field width. If not\nspecified, then the field width will be determined by the content.\n\nPreceding the *width* field by a zero ("\'0\'") character enables sign-\naware zero-padding for numeric types. This is equivalent to a *fill*\ncharacter of "\'0\'" with an *alignment* type of "\'=\'".\n\nThe *precision* is a decimal number indicating how many digits should\nbe displayed after the decimal point for a floating point value\nformatted with "\'f\'" and "\'F\'", or before and after the decimal point\nfor a floating point value formatted with "\'g\'" or "\'G\'". For non-\nnumber types the field indicates the maximum field size - in other\nwords, how many characters will be used from the field content. The\n*precision* is not allowed for integer values.\n\nFinally, the *type* determines how the data should be presented.\n\nThe available string presentation types are:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | "\'s\'" | String format. This is the default type for strings and |\n | | may be omitted. |\n +-----------+------------------------------------------------------------+\n | None | The same as "\'s\'". |\n +-----------+------------------------------------------------------------+\n\nThe available integer presentation types are:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | "\'b\'" | Binary format. Outputs the number in base 2. |\n +-----------+------------------------------------------------------------+\n | "\'c\'" | Character. Converts the integer to the corresponding |\n | | unicode character before printing. |\n +-----------+------------------------------------------------------------+\n | "\'d\'" | Decimal Integer. Outputs the number in base 10. |\n +-----------+------------------------------------------------------------+\n | "\'o\'" | Octal format. Outputs the number in base 8. |\n +-----------+------------------------------------------------------------+\n | "\'x\'" | Hex format. Outputs the number in base 16, using lower- |\n | | case letters for the digits above 9. |\n +-----------+------------------------------------------------------------+\n | "\'X\'" | Hex format. Outputs the number in base 16, using upper- |\n | | case letters for the digits above 9. |\n +-----------+------------------------------------------------------------+\n | "\'n\'" | Number. This is the same as "\'d\'", except that it uses the |\n | | current locale setting to insert the appropriate number |\n | | separator characters. |\n +-----------+------------------------------------------------------------+\n | None | The same as "\'d\'". |\n +-----------+------------------------------------------------------------+\n\nIn addition to the above presentation types, integers can be formatted\nwith the floating point presentation types listed below (except "\'n\'"\nand None). When doing so, "float()" is used to convert the integer to\na floating point number before formatting.\n\nThe available presentation types for floating point and decimal values\nare:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | "\'e\'" | Exponent notation. Prints the number in scientific |\n | | notation using the letter \'e\' to indicate the exponent. |\n | | The default precision is "6". |\n +-----------+------------------------------------------------------------+\n | "\'E\'" | Exponent notation. Same as "\'e\'" except it uses an upper |\n | | case \'E\' as the separator character. |\n +-----------+------------------------------------------------------------+\n | "\'f\'" | Fixed point. Displays the number as a fixed-point number. |\n | | The default precision is "6". |\n +-----------+------------------------------------------------------------+\n | "\'F\'" | Fixed point. Same as "\'f\'". |\n +-----------+------------------------------------------------------------+\n | "\'g\'" | General format. For a given precision "p >= 1", this |\n | | rounds the number to "p" significant digits and then |\n | | formats the result in either fixed-point format or in |\n | | scientific notation, depending on its magnitude. The |\n | | precise rules are as follows: suppose that the result |\n | | formatted with presentation type "\'e\'" and precision "p-1" |\n | | would have exponent "exp". Then if "-4 <= exp < p", the |\n | | number is formatted with presentation type "\'f\'" and |\n | | precision "p-1-exp". Otherwise, the number is formatted |\n | | with presentation type "\'e\'" and precision "p-1". In both |\n | | cases insignificant trailing zeros are removed from the |\n | | significand, and the decimal point is also removed if |\n | | there are no remaining digits following it. Positive and |\n | | negative infinity, positive and negative zero, and nans, |\n | | are formatted as "inf", "-inf", "0", "-0" and "nan" |\n | | respectively, regardless of the precision. A precision of |\n | | "0" is treated as equivalent to a precision of "1". The |\n | | default precision is "6". |\n +-----------+------------------------------------------------------------+\n | "\'G\'" | General format. Same as "\'g\'" except switches to "\'E\'" if |\n | | the number gets too large. The representations of infinity |\n | | and NaN are uppercased, too. |\n +-----------+------------------------------------------------------------+\n | "\'n\'" | Number. This is the same as "\'g\'", except that it uses the |\n | | current locale setting to insert the appropriate number |\n | | separator characters. |\n +-----------+------------------------------------------------------------+\n | "\'%\'" | Percentage. Multiplies the number by 100 and displays in |\n | | fixed ("\'f\'") format, followed by a percent sign. |\n +-----------+------------------------------------------------------------+\n | None | The same as "\'g\'". |\n +-----------+------------------------------------------------------------+\n\n\nFormat examples\n===============\n\nThis section contains examples of the new format syntax and comparison\nwith the old "%"-formatting.\n\nIn most of the cases the syntax is similar to the old "%"-formatting,\nwith the addition of the "{}" and with ":" used instead of "%". For\nexample, "\'%03.2f\'" can be translated to "\'{:03.2f}\'".\n\nThe new format syntax also supports new and different options, shown\nin the follow examples.\n\nAccessing arguments by position:\n\n >>> \'{0}, {1}, {2}\'.format(\'a\', \'b\', \'c\')\n \'a, b, c\'\n >>> \'{}, {}, {}\'.format(\'a\', \'b\', \'c\') # 2.7+ only\n \'a, b, c\'\n >>> \'{2}, {1}, {0}\'.format(\'a\', \'b\', \'c\')\n \'c, b, a\'\n >>> \'{2}, {1}, {0}\'.format(*\'abc\') # unpacking argument sequence\n \'c, b, a\'\n >>> \'{0}{1}{0}\'.format(\'abra\', \'cad\') # arguments\' indices can be repeated\n \'abracadabra\'\n\nAccessing arguments by name:\n\n >>> \'Coordinates: {latitude}, {longitude}\'.format(latitude=\'37.24N\', longitude=\'-115.81W\')\n \'Coordinates: 37.24N, -115.81W\'\n >>> coord = {\'latitude\': \'37.24N\', \'longitude\': \'-115.81W\'}\n >>> \'Coordinates: {latitude}, {longitude}\'.format(**coord)\n \'Coordinates: 37.24N, -115.81W\'\n\nAccessing arguments\' attributes:\n\n >>> c = 3-5j\n >>> (\'The complex number {0} is formed from the real part {0.real} \'\n ... \'and the imaginary part {0.imag}.\').format(c)\n \'The complex number (3-5j) is formed from the real part 3.0 and the imaginary part -5.0.\'\n >>> class Point(object):\n ... def __init__(self, x, y):\n ... self.x, self.y = x, y\n ... def __str__(self):\n ... return \'Point({self.x}, {self.y})\'.format(self=self)\n ...\n >>> str(Point(4, 2))\n \'Point(4, 2)\'\n\nAccessing arguments\' items:\n\n >>> coord = (3, 5)\n >>> \'X: {0[0]}; Y: {0[1]}\'.format(coord)\n \'X: 3; Y: 5\'\n\nReplacing "%s" and "%r":\n\n >>> "repr() shows quotes: {!r}; str() doesn\'t: {!s}".format(\'test1\', \'test2\')\n "repr() shows quotes: \'test1\'; str() doesn\'t: test2"\n\nAligning the text and specifying a width:\n\n >>> \'{:<30}\'.format(\'left aligned\')\n \'left aligned \'\n >>> \'{:>30}\'.format(\'right aligned\')\n \' right aligned\'\n >>> \'{:^30}\'.format(\'centered\')\n \' centered \'\n >>> \'{:*^30}\'.format(\'centered\') # use \'*\' as a fill char\n \'***********centered***********\'\n\nReplacing "%+f", "%-f", and "% f" and specifying a sign:\n\n >>> \'{:+f}; {:+f}\'.format(3.14, -3.14) # show it always\n \'+3.140000; -3.140000\'\n >>> \'{: f}; {: f}\'.format(3.14, -3.14) # show a space for positive numbers\n \' 3.140000; -3.140000\'\n >>> \'{:-f}; {:-f}\'.format(3.14, -3.14) # show only the minus -- same as \'{:f}; {:f}\'\n \'3.140000; -3.140000\'\n\nReplacing "%x" and "%o" and converting the value to different bases:\n\n >>> # format also supports binary numbers\n >>> "int: {0:d}; hex: {0:x}; oct: {0:o}; bin: {0:b}".format(42)\n \'int: 42; hex: 2a; oct: 52; bin: 101010\'\n >>> # with 0x, 0o, or 0b as prefix:\n >>> "int: {0:d}; hex: {0:#x}; oct: {0:#o}; bin: {0:#b}".format(42)\n \'int: 42; hex: 0x2a; oct: 0o52; bin: 0b101010\'\n\nUsing the comma as a thousands separator:\n\n >>> \'{:,}\'.format(1234567890)\n \'1,234,567,890\'\n\nExpressing a percentage:\n\n >>> points = 19.5\n >>> total = 22\n >>> \'Correct answers: {:.2%}\'.format(points/total)\n \'Correct answers: 88.64%\'\n\nUsing type-specific formatting:\n\n >>> import datetime\n >>> d = datetime.datetime(2010, 7, 4, 12, 15, 58)\n >>> \'{:%Y-%m-%d %H:%M:%S}\'.format(d)\n \'2010-07-04 12:15:58\'\n\nNesting arguments and more complex examples:\n\n >>> for align, text in zip(\'<^>\', [\'left\', \'center\', \'right\']):\n ... \'{0:{fill}{align}16}\'.format(text, fill=align, align=align)\n ...\n \'left<<<<<<<<<<<<\'\n \'^^^^^center^^^^^\'\n \'>>>>>>>>>>>right\'\n >>>\n >>> octets = [192, 168, 0, 1]\n >>> \'{:02X}{:02X}{:02X}{:02X}\'.format(*octets)\n \'C0A80001\'\n >>> int(_, 16)\n 3232235521\n >>>\n >>> width = 5\n >>> for num in range(5,12):\n ... for base in \'dXob\':\n ... print \'{0:{width}{base}}\'.format(num, base=base, width=width),\n ... print\n ...\n 5 5 5 101\n 6 6 6 110\n 7 7 7 111\n 8 8 10 1000\n 9 9 11 1001\n 10 A 12 1010\n 11 B 13 1011\n',
'function': u'\nFunction definitions\n********************\n\nA function definition defines a user-defined function object (see\nsection *The standard type hierarchy*):\n\n decorated ::= decorators (classdef | funcdef)\n decorators ::= decorator+\n decorator ::= "@" dotted_name ["(" [argument_list [","]] ")"] NEWLINE\n funcdef ::= "def" funcname "(" [parameter_list] ")" ":" suite\n dotted_name ::= identifier ("." identifier)*\n parameter_list ::= (defparameter ",")*\n ( "*" identifier ["," "**" identifier]\n | "**" identifier\n | defparameter [","] )\n defparameter ::= parameter ["=" expression]\n sublist ::= parameter ("," parameter)* [","]\n parameter ::= identifier | "(" sublist ")"\n funcname ::= identifier\n\nA function definition is an executable statement. Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function). This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition. The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object. Multiple decorators are applied in\nnested fashion. For example, the following code:\n\n @f1(arg)\n @f2\n def func(): pass\n\nis equivalent to:\n\n def func(): pass\n func = f1(arg)(f2(func))\n\nWhen one or more top-level *parameters* have the form *parameter* "="\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding *argument* may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted. If a parameter has a default value, all following\nparameters must also have a default value --- this is a syntactic\nrestriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated when the function definition\nis executed.** This means that the expression is evaluated once, when\nthe function is defined, and that the same "pre-computed" value is\nused for each call. This is especially important to understand when a\ndefault parameter is a mutable object, such as a list or a dictionary:\nif the function modifies the object (e.g. by appending an item to a\nlist), the default value is in effect modified. This is generally not\nwhat was intended. A way around this is to use "None" as the\ndefault, and explicitly test for it in the body of the function, e.g.:\n\n def whats_on_the_telly(penguin=None):\n if penguin is None:\n penguin = []\n penguin.append("property of the zoo")\n return penguin\n\nFunction call semantics are described in more detail in section\n*Calls*. A function call always assigns values to all parameters\nmentioned in the parameter list, either from position arguments, from\nkeyword arguments, or from default values. If the form\n""*identifier"" is present, it is initialized to a tuple receiving any\nexcess positional parameters, defaulting to the empty tuple. If the\nform ""**identifier"" is present, it is initialized to a new\ndictionary receiving any excess keyword arguments, defaulting to a new\nempty dictionary.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions. This uses lambda\nexpressions, described in section *Lambdas*. Note that the lambda\nexpression is merely a shorthand for a simplified function definition;\na function defined in a ""def"" statement can be passed around or\nassigned to another name just like a function defined by a lambda\nexpression. The ""def"" form is actually more powerful since it\nallows the execution of multiple statements.\n\n**Programmer\'s note:** Functions are first-class objects. A ""def""\nform executed inside a function definition defines a local function\nthat can be returned or passed around. Free variables used in the\nnested function can access the local variables of the function\ncontaining the def. See section *Naming and binding* for details.\n',
'global': u'\nThe "global" statement\n**********************\n\n global_stmt ::= "global" identifier ("," identifier)*\n\nThe "global" statement is a declaration which holds for the entire\ncurrent code block. It means that the listed identifiers are to be\ninterpreted as globals. It would be impossible to assign to a global\nvariable without "global", although free variables may refer to\nglobals without being declared global.\n\nNames listed in a "global" statement must not be used in the same code\nblock textually preceding that "global" statement.\n\nNames listed in a "global" statement must not be defined as formal\nparameters or in a "for" loop control target, "class" definition,\nfunction definition, or "import" statement.\n\n**CPython implementation detail:** The current implementation does not\nenforce the latter two restrictions, but programs should not abuse\nthis freedom, as future implementations may enforce them or silently\nchange the meaning of the program.\n\n**Programmer\'s note:** the "global" is a directive to the parser. It\napplies only to code parsed at the same time as the "global"\nstatement. In particular, a "global" statement contained in an "exec"\nstatement does not affect the code block *containing* the "exec"\nstatement, and code contained in an "exec" statement is unaffected by\n"global" statements in the code containing the "exec" statement. The\nsame applies to the "eval()", "execfile()" and "compile()" functions.\n',
'id-classes': u'\nReserved classes of identifiers\n*******************************\n\nCertain classes of identifiers (besides keywords) have special\nmeanings. These classes are identified by the patterns of leading and\ntrailing underscore characters:\n\n"_*"\n Not imported by "from module import *". The special identifier "_"\n is used in the interactive interpreter to store the result of the\n last evaluation; it is stored in the "__builtin__" module. When\n not in interactive mode, "_" has no special meaning and is not\n defined. See section *The import statement*.\n\n Note: The name "_" is often used in conjunction with\n internationalization; refer to the documentation for the\n "gettext" module for more information on this convention.\n\n"__*__"\n System-defined names. These names are defined by the interpreter\n and its implementation (including the standard library). Current\n system names are discussed in the *Special method names* section\n and elsewhere. More will likely be defined in future versions of\n Python. *Any* use of "__*__" names, in any context, that does not\n follow explicitly documented use, is subject to breakage without\n warning.\n\n"__*"\n Class-private names. Names in this category, when used within the\n context of a class definition, are re-written to use a mangled form\n to help avoid name clashes between "private" attributes of base and\n derived classes. See section *Identifiers (Names)*.\n',
'identifiers': u'\nIdentifiers and keywords\n************************\n\nIdentifiers (also referred to as *names*) are described by the\nfollowing lexical definitions:\n\n identifier ::= (letter|"_") (letter | digit | "_")*\n letter ::= lowercase | uppercase\n lowercase ::= "a"..."z"\n uppercase ::= "A"..."Z"\n digit ::= "0"..."9"\n\nIdentifiers are unlimited in length. Case is significant.\n\n\nKeywords\n========\n\nThe following identifiers are used as reserved words, or *keywords* of\nthe language, and cannot be used as ordinary identifiers. They must\nbe spelled exactly as written here:\n\n and del from not while\n as elif global or with\n assert else if pass yield\n break except import print\n class exec in raise\n continue finally is return\n def for lambda try\n\nChanged in version 2.4: "None" became a constant and is now recognized\nby the compiler as a name for the built-in object "None". Although it\nis not a keyword, you cannot assign a different object to it.\n\nChanged in version 2.5: Using "as" and "with" as identifiers triggers\na warning. To use them as keywords, enable the "with_statement"\nfuture feature .\n\nChanged in version 2.6: "as" and "with" are full keywords.\n\n\nReserved classes of identifiers\n===============================\n\nCertain classes of identifiers (besides keywords) have special\nmeanings. These classes are identified by the patterns of leading and\ntrailing underscore characters:\n\n"_*"\n Not imported by "from module import *". The special identifier "_"\n is used in the interactive interpreter to store the result of the\n last evaluation; it is stored in the "__builtin__" module. When\n not in interactive mode, "_" has no special meaning and is not\n defined. See section *The import statement*.\n\n Note: The name "_" is often used in conjunction with\n internationalization; refer to the documentation for the\n "gettext" module for more information on this convention.\n\n"__*__"\n System-defined names. These names are defined by the interpreter\n and its implementation (including the standard library). Current\n system names are discussed in the *Special method names* section\n and elsewhere. More will likely be defined in future versions of\n Python. *Any* use of "__*__" names, in any context, that does not\n follow explicitly documented use, is subject to breakage without\n warning.\n\n"__*"\n Class-private names. Names in this category, when used within the\n context of a class definition, are re-written to use a mangled form\n to help avoid name clashes between "private" attributes of base and\n derived classes. See section *Identifiers (Names)*.\n',
'if': u'\nThe "if" statement\n******************\n\nThe "if" statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the "if" statement is executed or evaluated).\nIf all expressions are false, the suite of the "else" clause, if\npresent, is executed.\n',
'imaginary': u'\nImaginary literals\n******************\n\nImaginary literals are described by the following lexical definitions:\n\n imagnumber ::= (floatnumber | intpart) ("j" | "J")\n\nAn imaginary literal yields a complex number with a real part of 0.0.\nComplex numbers are represented as a pair of floating point numbers\nand have the same restrictions on their range. To create a complex\nnumber with a nonzero real part, add a floating point number to it,\ne.g., "(3+4j)". Some examples of imaginary literals:\n\n 3.14j 10.j 10j .001j 1e100j 3.14e-10j\n',
'import': u'\nThe "import" statement\n**********************\n\n import_stmt ::= "import" module ["as" name] ( "," module ["as" name] )*\n | "from" relative_module "import" identifier ["as" name]\n ( "," identifier ["as" name] )*\n | "from" relative_module "import" "(" identifier ["as" name]\n ( "," identifier ["as" name] )* [","] ")"\n | "from" module "import" "*"\n module ::= (identifier ".")* identifier\n relative_module ::= "."* module | "."+\n name ::= identifier\n\nImport statements are executed in two steps: (1) find a module, and\ninitialize it if necessary; (2) define a name or names in the local\nnamespace (of the scope where the "import" statement occurs). The\nstatement comes in two forms differing on whether it uses the "from"\nkeyword. The first form (without "from") repeats these steps for each\nidentifier in the list. The form with "from" performs step (1) once,\nand then performs step (2) repeatedly.\n\nTo understand how step (1) occurs, one must first understand how\nPython handles hierarchical naming of modules. To help organize\nmodules and provide a hierarchy in naming, Python has a concept of\npackages. A package can contain other packages and modules while\nmodules cannot contain other modules or packages. From a file system\nperspective, packages are directories and modules are files.\n\nOnce the name of the module is known (unless otherwise specified, the\nterm "module" will refer to both packages and modules), searching for\nthe module or package can begin. The first place checked is\n"sys.modules", the cache of all modules that have been imported\npreviously. If the module is found there then it is used in step (2)\nof import.\n\nIf the module is not found in the cache, then "sys.meta_path" is\nsearched (the specification for "sys.meta_path" can be found in **PEP\n302**). The object is a list of *finder* objects which are queried in\norder as to whether they know how to load the module by calling their\n"find_module()" method with the name of the module. If the module\nhappens to be contained within a package (as denoted by the existence\nof a dot in the name), then a second argument to "find_module()" is\ngiven as the value of the "__path__" attribute from the parent package\n(everything up to the last dot in the name of the module being\nimported). If a finder can find the module it returns a *loader*\n(discussed later) or returns "None".\n\nIf none of the finders on "sys.meta_path" are able to find the module\nthen some implicitly defined finders are queried. Implementations of\nPython vary in what implicit meta path finders are defined. The one\nthey all do define, though, is one that handles "sys.path_hooks",\n"sys.path_importer_cache", and "sys.path".\n\nThe implicit finder searches for the requested module in the "paths"\nspecified in one of two places ("paths" do not have to be file system\npaths). If the module being imported is supposed to be contained\nwithin a package then the second argument passed to "find_module()",\n"__path__" on the parent package, is used as the source of paths. If\nthe module is not contained in a package then "sys.path" is used as\nthe source of paths.\n\nOnce the source of paths is chosen it is iterated over to find a\nfinder that can handle that path. The dict at\n"sys.path_importer_cache" caches finders for paths and is checked for\na finder. If the path does not have a finder cached then\n"sys.path_hooks" is searched by calling each object in the list with a\nsingle argument of the path, returning a finder or raises\n"ImportError". If a finder is returned then it is cached in\n"sys.path_importer_cache" and then used for that path entry. If no\nfinder can be found but the path exists then a value of "None" is\nstored in "sys.path_importer_cache" to signify that an implicit, file-\nbased finder that handles modules stored as individual files should be\nused for that path. If the path does not exist then a finder which\nalways returns "None" is placed in the cache for the path.\n\nIf no finder can find the module then "ImportError" is raised.\nOtherwise some finder returned a loader whose "load_module()" method\nis called with the name of the module to load (see **PEP 302** for the\noriginal definition of loaders). A loader has several responsibilities\nto perform on a module it loads. First, if the module already exists\nin "sys.modules" (a possibility if the loader is called outside of the\nimport machinery) then it is to use that module for initialization and\nnot a new module. But if the module does not exist in "sys.modules"\nthen it is to be added to that dict before initialization begins. If\nan error occurs during loading of the module and it was added to\n"sys.modules" it is to be removed from the dict. If an error occurs\nbut the module was already in "sys.modules" it is left in the dict.\n\nThe loader must set several attributes on the module. "__name__" is to\nbe set to the name of the module. "__file__" is to be the "path" to\nthe file unless the module is built-in (and thus listed in\n"sys.builtin_module_names") in which case the attribute is not set. If\nwhat is being imported is a package then "__path__" is to be set to a\nlist of paths to be searched when looking for modules and packages\ncontained within the package being imported. "__package__" is optional\nbut should be set to the name of package that contains the module or\npackage (the empty string is used for module not contained in a\npackage). "__loader__" is also optional but should be set to the\nloader object that is loading the module.\n\nIf an error occurs during loading then the loader raises "ImportError"\nif some other exception is not already being propagated. Otherwise the\nloader returns the module that was loaded and initialized.\n\nWhen step (1) finishes without raising an exception, step (2) can\nbegin.\n\nThe first form of "import" statement binds the module name in the\nlocal namespace to the module object, and then goes on to import the\nnext identifier, if any. If the module name is followed by "as", the\nname following "as" is used as the local name for the module.\n\nThe "from" form does not bind the module name: it goes through the\nlist of identifiers, looks each one of them up in the module found in\nstep (1), and binds the name in the local namespace to the object thus\nfound. As with the first form of "import", an alternate local name\ncan be supplied by specifying ""as" localname". If a name is not\nfound, "ImportError" is raised. If the list of identifiers is\nreplaced by a star ("\'*\'"), all public names defined in the module are\nbound in the local namespace of the "import" statement..\n\nThe *public names* defined by a module are determined by checking the\nmodule\'s namespace for a variable named "__all__"; if defined, it must\nbe a sequence of strings which are names defined or imported by that\nmodule. The names given in "__all__" are all considered public and\nare required to exist. If "__all__" is not defined, the set of public\nnames includes all names found in the module\'s namespace which do not\nbegin with an underscore character ("\'_\'"). "__all__" should contain\nthe entire public API. It is intended to avoid accidentally exporting\nitems that are not part of the API (such as library modules which were\nimported and used within the module).\n\nThe "from" form with "*" may only occur in a module scope. If the\nwild card form of import --- "import *" --- is used in a function and\nthe function contains or is a nested block with free variables, the\ncompiler will raise a "SyntaxError".\n\nWhen specifying what module to import you do not have to specify the\nabsolute name of the module. When a module or package is contained\nwithin another package it is possible to make a relative import within\nthe same top package without having to mention the package name. By\nusing leading dots in the specified module or package after "from" you\ncan specify how high to traverse up the current package hierarchy\nwithout specifying exact names. One leading dot means the current\npackage where the module making the import exists. Two dots means up\none package level. Three dots is up two levels, etc. So if you execute\n"from . import mod" from a module in the "pkg" package then you will\nend up importing "pkg.mod". If you execute "from ..subpkg2 import mod"\nfrom within "pkg.subpkg1" you will import "pkg.subpkg2.mod". The\nspecification for relative imports is contained within **PEP 328**.\n\n"importlib.import_module()" is provided to support applications that\ndetermine which modules need to be loaded dynamically.\n\n\nFuture statements\n=================\n\nA *future statement* is a directive to the compiler that a particular\nmodule should be compiled using syntax or semantics that will be\navailable in a specified future release of Python. The future\nstatement is intended to ease migration to future versions of Python\nthat introduce incompatible changes to the language. It allows use of\nthe new features on a per-module basis before the release in which the\nfeature becomes standard.\n\n future_statement ::= "from" "__future__" "import" feature ["as" name]\n ("," feature ["as" name])*\n | "from" "__future__" "import" "(" feature ["as" name]\n ("," feature ["as" name])* [","] ")"\n feature ::= identifier\n name ::= identifier\n\nA future statement must appear near the top of the module. The only\nlines that can appear before a future statement are:\n\n* the module docstring (if any),\n\n* comments,\n\n* blank lines, and\n\n* other future statements.\n\nThe features recognized by Python 2.6 are "unicode_literals",\n"print_function", "absolute_import", "division", "generators",\n"nested_scopes" and "with_statement". "generators", "with_statement",\n"nested_scopes" are redundant in Python version 2.6 and above because\nthey are always enabled.\n\nA future statement is recognized and treated specially at compile\ntime: Changes to the semantics of core constructs are often\nimplemented by generating different code. It may even be the case\nthat a new feature introduces new incompatible syntax (such as a new\nreserved word), in which case the compiler may need to parse the\nmodule differently. Such decisions cannot be pushed off until\nruntime.\n\nFor any given release, the compiler knows which feature names have\nbeen defined, and raises a compile-time error if a future statement\ncontains a feature not known to it.\n\nThe direct runtime semantics are the same as for any import statement:\nthere is a standard module "__future__", described later, and it will\nbe imported in the usual way at the time the future statement is\nexecuted.\n\nThe interesting runtime semantics depend on the specific feature\nenabled by the future statement.\n\nNote that there is nothing special about the statement:\n\n import __future__ [as name]\n\nThat is not a future statement; it\'s an ordinary import statement with\nno special semantics or syntax restrictions.\n\nCode compiled by an "exec" statement or calls to the built-in\nfunctions "compile()" and "execfile()" that occur in a module "M"\ncontaining a future statement will, by default, use the new syntax or\nsemantics associated with the future statement. This can, starting\nwith Python 2.2 be controlled by optional arguments to "compile()" ---\nsee the documentation of that function for details.\n\nA future statement typed at an interactive interpreter prompt will\ntake effect for the rest of the interpreter session. If an\ninterpreter is started with the *-i* option, is passed a script name\nto execute, and the script includes a future statement, it will be in\neffect in the interactive session started after the script is\nexecuted.\n\nSee also: **PEP 236** - Back to the __future__\n\n The original proposal for the __future__ mechanism.\n',
'in': u'\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation. Also unlike C, expressions like "a < b < c" have the\ninterpretation that is conventional in mathematics:\n\n comparison ::= or_expr ( comp_operator or_expr )*\n comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "<>" | "!="\n | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: "True" or "False".\n\nComparisons can be chained arbitrarily, e.g., "x < y <= z" is\nequivalent to "x < y and y <= z", except that "y" is evaluated only\nonce (but in both cases "z" is not evaluated at all when "x < y" is\nfound to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then "a op1 b op2 c ... y\nopN z" is equivalent to "a op1 b and b op2 c and ... y opN z", except\nthat each expression is evaluated at most once.\n\nNote that "a op1 b op2 c" doesn\'t imply any kind of comparison between\n*a* and *c*, so that, e.g., "x < y > z" is perfectly legal (though\nperhaps not pretty).\n\nThe forms "<>" and "!=" are equivalent; for consistency with C, "!="\nis preferred; where "!=" is mentioned below "<>" is also accepted.\nThe "<>" spelling is considered obsolescent.\n\nThe operators "<", ">", "==", ">=", "<=", and "!=" compare the values\nof two objects. The objects need not have the same type. If both are\nnumbers, they are converted to a common type. Otherwise, objects of\ndifferent types *always* compare unequal, and are ordered consistently\nbut arbitrarily. You can control comparison behavior of objects of\nnon-built-in types by defining a "__cmp__" method or rich comparison\nmethods like "__gt__", described in section *Special method names*.\n\n(This unusual definition of comparison was used to simplify the\ndefinition of operations like sorting and the "in" and "not in"\noperators. In the future, the comparison rules for objects of\ndifferent types are likely to change.)\n\nComparison of objects of the same type depends on the type:\n\n* Numbers are compared arithmetically.\n\n* Strings are compared lexicographically using the numeric\n equivalents (the result of the built-in function "ord()") of their\n characters. Unicode and 8-bit strings are fully interoperable in\n this behavior. [4]\n\n* Tuples and lists are compared lexicographically using comparison\n of corresponding elements. This means that to compare equal, each\n element must compare equal and the two sequences must be of the same\n type and have the same length.\n\n If not equal, the sequences are ordered the same as their first\n differing elements. For example, "cmp([1,2,x], [1,2,y])" returns\n the same as "cmp(x,y)". If the corresponding element does not\n exist, the shorter sequence is ordered first (for example, "[1,2] <\n [1,2,3]").\n\n* Mappings (dictionaries) compare equal if and only if their sorted\n (key, value) lists compare equal. [5] Outcomes other than equality\n are resolved consistently, but are not otherwise defined. [6]\n\n* Most other objects of built-in types compare unequal unless they\n are the same object; the choice whether one object is considered\n smaller or larger than another one is made arbitrarily but\n consistently within one execution of a program.\n\nThe operators "in" and "not in" test for collection membership. "x in\ns" evaluates to true if *x* is a member of the collection *s*, and\nfalse otherwise. "x not in s" returns the negation of "x in s". The\ncollection membership test has traditionally been bound to sequences;\nan object is a member of a collection if the collection is a sequence\nand contains an element equal to that object. However, it make sense\nfor many other object types to support membership tests without being\na sequence. In particular, dictionaries (for keys) and sets support\nmembership testing.\n\nFor the list and tuple types, "x in y" is true if and only if there\nexists an index *i* such that "x == y[i]" is true.\n\nFor the Unicode and string types, "x in y" is true if and only if *x*\nis a substring of *y*. An equivalent test is "y.find(x) != -1".\nNote, *x* and *y* need not be the same type; consequently, "u\'ab\' in\n\'abc\'" will return "True". Empty strings are always considered to be a\nsubstring of any other string, so """ in "abc"" will return "True".\n\nChanged in version 2.3: Previously, *x* was required to be a string of\nlength "1".\n\nFor user-defined classes which define the "__contains__()" method, "x\nin y" is true if and only if "y.__contains__(x)" is true.\n\nFor user-defined classes which do not define "__contains__()" but do\ndefine "__iter__()", "x in y" is true if some value "z" with "x == z"\nis produced while iterating over "y". If an exception is raised\nduring the iteration, it is as if "in" raised that exception.\n\nLastly, the old-style iteration protocol is tried: if a class defines\n"__getitem__()", "x in y" is true if and only if there is a non-\nnegative integer index *i* such that "x == y[i]", and all lower\ninteger indices do not raise "IndexError" exception. (If any other\nexception is raised, it is as if "in" raised that exception).\n\nThe operator "not in" is defined to have the inverse true value of\n"in".\n\nThe operators "is" and "is not" test for object identity: "x is y" is\ntrue if and only if *x* and *y* are the same object. "x is not y"\nyields the inverse truth value. [7]\n',
'integers': u'\nInteger and long integer literals\n*********************************\n\nInteger and long integer literals are described by the following\nlexical definitions:\n\n longinteger ::= integer ("l" | "L")\n integer ::= decimalinteger | octinteger | hexinteger | bininteger\n decimalinteger ::= nonzerodigit digit* | "0"\n octinteger ::= "0" ("o" | "O") octdigit+ | "0" octdigit+\n hexinteger ::= "0" ("x" | "X") hexdigit+\n bininteger ::= "0" ("b" | "B") bindigit+\n nonzerodigit ::= "1"..."9"\n octdigit ::= "0"..."7"\n bindigit ::= "0" | "1"\n hexdigit ::= digit | "a"..."f" | "A"..."F"\n\nAlthough both lower case "\'l\'" and upper case "\'L\'" are allowed as\nsuffix for long integers, it is strongly recommended to always use\n"\'L\'", since the letter "\'l\'" looks too much like the digit "\'1\'".\n\nPlain integer literals that are above the largest representable plain\ninteger (e.g., 2147483647 when using 32-bit arithmetic) are accepted\nas if they were long integers instead. [1] There is no limit for long\ninteger literals apart from what can be stored in available memory.\n\nSome examples of plain integer literals (first row) and long integer\nliterals (second and third rows):\n\n 7 2147483647 0177\n 3L 79228162514264337593543950336L 0377L 0x100000000L\n 79228162514264337593543950336 0xdeadbeef\n',
'lambda': u'\nLambdas\n*******\n\n lambda_expr ::= "lambda" [parameter_list]: expression\n old_lambda_expr ::= "lambda" [parameter_list]: old_expression\n\nLambda expressions (sometimes called lambda forms) have the same\nsyntactic position as expressions. They are a shorthand to create\nanonymous functions; the expression "lambda arguments: expression"\nyields a function object. The unnamed object behaves like a function\nobject defined with\n\n def name(arguments):\n return expression\n\nSee section *Function definitions* for the syntax of parameter lists.\nNote that functions created with lambda expressions cannot contain\nstatements.\n',
'lists': u'\nList displays\n*************\n\nA list display is a possibly empty series of expressions enclosed in\nsquare brackets:\n\n list_display ::= "[" [expression_list | list_comprehension] "]"\n list_comprehension ::= expression list_for\n list_for ::= "for" target_list "in" old_expression_list [list_iter]\n old_expression_list ::= old_expression [("," old_expression)+ [","]]\n old_expression ::= or_test | old_lambda_expr\n list_iter ::= list_for | list_if\n list_if ::= "if" old_expression [list_iter]\n\nA list display yields a new list object. Its contents are specified\nby providing either a list of expressions or a list comprehension.\nWhen a comma-separated list of expressions is supplied, its elements\nare evaluated from left to right and placed into the list object in\nthat order. When a list comprehension is supplied, it consists of a\nsingle expression followed by at least one "for" clause and zero or\nmore "for" or "if" clauses. In this case, the elements of the new\nlist are those that would be produced by considering each of the "for"\nor "if" clauses a block, nesting from left to right, and evaluating\nthe expression to produce a list element each time the innermost block\nis reached [1].\n',
'naming': u'\nNaming and binding\n******************\n\n*Names* refer to objects. Names are introduced by name binding\noperations. Each occurrence of a name in the program text refers to\nthe *binding* of that name established in the innermost function block\ncontaining the use.\n\nA *block* is a piece of Python program text that is executed as a\nunit. The following are blocks: a module, a function body, and a class\ndefinition. Each command typed interactively is a block. A script\nfile (a file given as standard input to the interpreter or specified\non the interpreter command line the first argument) is a code block.\nA script command (a command specified on the interpreter command line\nwith the \'**-c**\' option) is a code block. The file read by the\nbuilt-in function "execfile()" is a code block. The string argument\npassed to the built-in function "eval()" and to the "exec" statement\nis a code block. The expression read and evaluated by the built-in\nfunction "input()" is a code block.\n\nA code block is executed in an *execution frame*. A frame contains\nsome administrative information (used for debugging) and determines\nwhere and how execution continues after the code block\'s execution has\ncompleted.\n\nA *scope* defines the visibility of a name within a block. If a local\nvariable is defined in a block, its scope includes that block. If the\ndefinition occurs in a function block, the scope extends to any blocks\ncontained within the defining one, unless a contained block introduces\na different binding for the name. The scope of names defined in a\nclass block is limited to the class block; it does not extend to the\ncode blocks of methods -- this includes generator expressions since\nthey are implemented using a function scope. This means that the\nfollowing will fail:\n\n class A:\n a = 42\n b = list(a + i for i in range(10))\n\nWhen a name is used in a code block, it is resolved using the nearest\nenclosing scope. The set of all such scopes visible to a code block\nis called the block\'s *environment*.\n\nIf a name is bound in a block, it is a local variable of that block.\nIf a name is bound at the module level, it is a global variable. (The\nvariables of the module code block are local and global.) If a\nvariable is used in a code block but not defined there, it is a *free\nvariable*.\n\nWhen a name is not found at all, a "NameError" exception is raised.\nIf the name refers to a local variable that has not been bound, a\n"UnboundLocalError" exception is raised. "UnboundLocalError" is a\nsubclass of "NameError".\n\nThe following constructs bind names: formal parameters to functions,\n"import" statements, class and function definitions (these bind the\nclass or function name in the defining block), and targets that are\nidentifiers if occurring in an assignment, "for" loop header, in the\nsecond position of an "except" clause header or after "as" in a "with"\nstatement. The "import" statement of the form "from ... import *"\nbinds all names defined in the imported module, except those beginning\nwith an underscore. This form may only be used at the module level.\n\nA target occurring in a "del" statement is also considered bound for\nthis purpose (though the actual semantics are to unbind the name). It\nis illegal to unbind a name that is referenced by an enclosing scope;\nthe compiler will report a "SyntaxError".\n\nEach assignment or import statement occurs within a block defined by a\nclass or function definition or at the module level (the top-level\ncode block).\n\nIf a name binding operation occurs anywhere within a code block, all\nuses of the name within the block are treated as references to the\ncurrent block. This can lead to errors when a name is used within a\nblock before it is bound. This rule is subtle. Python lacks\ndeclarations and allows name binding operations to occur anywhere\nwithin a code block. The local variables of a code block can be\ndetermined by scanning the entire text of the block for name binding\noperations.\n\nIf the global statement occurs within a block, all uses of the name\nspecified in the statement refer to the binding of that name in the\ntop-level namespace. Names are resolved in the top-level namespace by\nsearching the global namespace, i.e. the namespace of the module\ncontaining the code block, and the builtins namespace, the namespace\nof the module "__builtin__". The global namespace is searched first.\nIf the name is not found there, the builtins namespace is searched.\nThe global statement must precede all uses of the name.\n\nThe builtins namespace associated with the execution of a code block\nis actually found by looking up the name "__builtins__" in its global\nnamespace; this should be a dictionary or a module (in the latter case\nthe module\'s dictionary is used). By default, when in the "__main__"\nmodule, "__builtins__" is the built-in module "__builtin__" (note: no\n\'s\'); when in any other module, "__builtins__" is an alias for the\ndictionary of the "__builtin__" module itself. "__builtins__" can be\nset to a user-created dictionary to create a weak form of restricted\nexecution.\n\n**CPython implementation detail:** Users should not touch\n"__builtins__"; it is strictly an implementation detail. Users\nwanting to override values in the builtins namespace should "import"\nthe "__builtin__" (no \'s\') module and modify its attributes\nappropriately.\n\nThe namespace for a module is automatically created the first time a\nmodule is imported. The main module for a script is always called\n"__main__".\n\nThe "global" statement has the same scope as a name binding operation\nin the same block. If the nearest enclosing scope for a free variable\ncontains a global statement, the free variable is treated as a global.\n\nA class definition is an executable statement that may use and define\nnames. These references follow the normal rules for name resolution.\nThe namespace of the class definition becomes the attribute dictionary\nof the class. Names defined at the class scope are not visible in\nmethods.\n\n\nInteraction with dynamic features\n=================================\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- "import *" --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a "SyntaxError".\n\nIf "exec" is used in a function and the function contains or is a\nnested block with free variables, the compiler will raise a\n"SyntaxError" unless the exec explicitly specifies the local namespace\nfor the "exec". (In other words, "exec obj" would be illegal, but\n"exec obj in ns" would be legal.)\n\nThe "eval()", "execfile()", and "input()" functions and the "exec"\nstatement do not have access to the full environment for resolving\nnames. Names may be resolved in the local and global namespaces of\nthe caller. Free variables are not resolved in the nearest enclosing\nnamespace, but in the global namespace. [1] The "exec" statement and\nthe "eval()" and "execfile()" functions have optional arguments to\noverride the global and local namespace. If only one namespace is\nspecified, it is used for both.\n',
'numbers': u'\nNumeric literals\n****************\n\nThere are four types of numeric literals: plain integers, long\nintegers, floating point numbers, and imaginary numbers. There are no\ncomplex literals (complex numbers can be formed by adding a real\nnumber and an imaginary number).\n\nNote that numeric literals do not include a sign; a phrase like "-1"\nis actually an expression composed of the unary operator \'"-"\' and the\nliteral "1".\n',
'numeric-types': u'\nEmulating numeric types\n***********************\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations ("+", "-", "*", "//", "%", "divmod()", "pow()", "**",\n "<<", ">>", "&", "^", "|"). For instance, to evaluate the\n expression "x + y", where *x* is an instance of a class that has an\n "__add__()" method, "x.__add__(y)" is called. The "__divmod__()"\n method should be the equivalent to using "__floordiv__()" and\n "__mod__()"; it should not be related to "__truediv__()" (described\n below). Note that "__pow__()" should be defined to accept an\n optional third argument if the ternary version of the built-in\n "pow()" function is to be supported.\n\n If one of those methods does not support the operation with the\n supplied arguments, it should return "NotImplemented".\n\nobject.__div__(self, other)\nobject.__truediv__(self, other)\n\n The division operator ("/") is implemented by these methods. The\n "__truediv__()" method is used when "__future__.division" is in\n effect, otherwise "__div__()" is used. If only one of these two\n methods is defined, the object will not support division in the\n alternate context; "TypeError" will be raised instead.\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rdiv__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations ("+", "-", "*", "/", "%", "divmod()", "pow()", "**",\n "<<", ">>", "&", "^", "|") with reflected (swapped) operands.\n These functions are only called if the left operand does not\n support the corresponding operation and the operands are of\n different types. [2] For instance, to evaluate the expression "x -\n y", where *y* is an instance of a class that has an "__rsub__()"\n method, "y.__rsub__(x)" is called if "x.__sub__(y)" returns\n *NotImplemented*.\n\n Note that ternary "pow()" will not try calling "__rpow__()" (the\n coercion rules would become too complicated).\n\n Note: If the right operand\'s type is a subclass of the left\n operand\'s type and that subclass provides the reflected method\n for the operation, this method will be called before the left\n operand\'s non-reflected method. This behavior allows subclasses\n to override their ancestors\' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__idiv__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n These methods are called to implement the augmented arithmetic\n assignments ("+=", "-=", "*=", "/=", "//=", "%=", "**=", "<<=",\n ">>=", "&=", "^=", "|="). These methods should attempt to do the\n operation in-place (modifying *self*) and return the result (which\n could be, but does not have to be, *self*). If a specific method\n is not defined, the augmented assignment falls back to the normal\n methods. For instance, to execute the statement "x += y", where\n *x* is an instance of a class that has an "__iadd__()" method,\n "x.__iadd__(y)" is called. If *x* is an instance of a class that\n does not define a "__iadd__()" method, "x.__add__(y)" and\n "y.__radd__(x)" are considered, as with the evaluation of "x + y".\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n Called to implement the unary arithmetic operations ("-", "+",\n "abs()" and "~").\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__long__(self)\nobject.__float__(self)\n\n Called to implement the built-in functions "complex()", "int()",\n "long()", and "float()". Should return a value of the appropriate\n type.\n\nobject.__oct__(self)\nobject.__hex__(self)\n\n Called to implement the built-in functions "oct()" and "hex()".\n Should return a string value.\n\nobject.__index__(self)\n\n Called to implement "operator.index()". Also called whenever\n Python needs an integer object (such as in slicing). Must return\n an integer (int or long).\n\n New in version 2.5.\n\nobject.__coerce__(self, other)\n\n Called to implement "mixed-mode" numeric arithmetic. Should either\n return a 2-tuple containing *self* and *other* converted to a\n common numeric type, or "None" if conversion is impossible. When\n the common type would be the type of "other", it is sufficient to\n return "None", since the interpreter will also ask the other object\n to attempt a coercion (but sometimes, if the implementation of the\n other type cannot be changed, it is useful to do the conversion to\n the other type here). A return value of "NotImplemented" is\n equivalent to returning "None".\n',
'objects': u'\nObjects, values and types\n*************************\n\n*Objects* are Python\'s abstraction for data. All data in a Python\nprogram is represented by objects or by relations between objects. (In\na sense, and in conformance to Von Neumann\'s model of a "stored\nprogram computer," code is also represented by objects.)\n\nEvery object has an identity, a type and a value. An object\'s\n*identity* never changes once it has been created; you may think of it\nas the object\'s address in memory. The \'"is"\' operator compares the\nidentity of two objects; the "id()" function returns an integer\nrepresenting its identity (currently implemented as its address). An\nobject\'s *type* is also unchangeable. [1] An object\'s type determines\nthe operations that the object supports (e.g., "does it have a\nlength?") and also defines the possible values for objects of that\ntype. The "type()" function returns an object\'s type (which is an\nobject itself). The *value* of some objects can change. Objects\nwhose value can change are said to be *mutable*; objects whose value\nis unchangeable once they are created are called *immutable*. (The\nvalue of an immutable container object that contains a reference to a\nmutable object can change when the latter\'s value is changed; however\nthe container is still considered immutable, because the collection of\nobjects it contains cannot be changed. So, immutability is not\nstrictly the same as having an unchangeable value, it is more subtle.)\nAn object\'s mutability is determined by its type; for instance,\nnumbers, strings and tuples are immutable, while dictionaries and\nlists are mutable.\n\nObjects are never explicitly destroyed; however, when they become\nunreachable they may be garbage-collected. An implementation is\nallowed to postpone garbage collection or omit it altogether --- it is\na matter of implementation quality how garbage collection is\nimplemented, as long as no objects are collected that are still\nreachable.\n\n**CPython implementation detail:** CPython currently uses a reference-\ncounting scheme with (optional) delayed detection of cyclically linked\ngarbage, which collects most objects as soon as they become\nunreachable, but is not guaranteed to collect garbage containing\ncircular references. See the documentation of the "gc" module for\ninformation on controlling the collection of cyclic garbage. Other\nimplementations act differently and CPython may change. Do not depend\non immediate finalization of objects when they become unreachable (ex:\nalways close files).\n\nNote that the use of the implementation\'s tracing or debugging\nfacilities may keep objects alive that would normally be collectable.\nAlso note that catching an exception with a \'"try"..."except"\'\nstatement may keep objects alive.\n\nSome objects contain references to "external" resources such as open\nfiles or windows. It is understood that these resources are freed\nwhen the object is garbage-collected, but since garbage collection is\nnot guaranteed to happen, such objects also provide an explicit way to\nrelease the external resource, usually a "close()" method. Programs\nare strongly recommended to explicitly close such objects. The\n\'"try"..."finally"\' statement provides a convenient way to do this.\n\nSome objects contain references to other objects; these are called\n*containers*. Examples of containers are tuples, lists and\ndictionaries. The references are part of a container\'s value. In\nmost cases, when we talk about the value of a container, we imply the\nvalues, not the identities of the contained objects; however, when we\ntalk about the mutability of a container, only the identities of the\nimmediately contained objects are implied. So, if an immutable\ncontainer (like a tuple) contains a reference to a mutable object, its\nvalue changes if that mutable object is changed.\n\nTypes affect almost all aspects of object behavior. Even the\nimportance of object identity is affected in some sense: for immutable\ntypes, operations that compute new values may actually return a\nreference to any existing object with the same type and value, while\nfor mutable objects this is not allowed. E.g., after "a = 1; b = 1",\n"a" and "b" may or may not refer to the same object with the value\none, depending on the implementation, but after "c = []; d = []", "c"\nand "d" are guaranteed to refer to two different, unique, newly\ncreated empty lists. (Note that "c = d = []" assigns the same object\nto both "c" and "d".)\n',
'operator-summary': u'\nOperator precedence\n*******************\n\nThe following table summarizes the operator precedences in Python,\nfrom lowest precedence (least binding) to highest precedence (most\nbinding). Operators in the same box have the same precedence. Unless\nthe syntax is explicitly given, operators are binary. Operators in\nthe same box group left to right (except for comparisons, including\ntests, which all have the same precedence and chain from left to right\n--- see section *Comparisons* --- and exponentiation, which groups\nfrom right to left).\n\n+-------------------------------------------------+---------------------------------------+\n| Operator | Description |\n+=================================================+=======================================+\n| "lambda" | Lambda expression |\n+-------------------------------------------------+---------------------------------------+\n| "if" -- "else" | Conditional expression |\n+-------------------------------------------------+---------------------------------------+\n| "or" | Boolean OR |\n+-------------------------------------------------+---------------------------------------+\n| "and" | Boolean AND |\n+-------------------------------------------------+---------------------------------------+\n| "not" "x" | Boolean NOT |\n+-------------------------------------------------+---------------------------------------+\n| "in", "not in", "is", "is not", "<", "<=", ">", | Comparisons, including membership |\n| ">=", "<>", "!=", "==" | tests and identity tests |\n+-------------------------------------------------+---------------------------------------+\n| "|" | Bitwise OR |\n+-------------------------------------------------+---------------------------------------+\n| "^" | Bitwise XOR |\n+-------------------------------------------------+---------------------------------------+\n| "&" | Bitwise AND |\n+-------------------------------------------------+---------------------------------------+\n| "<<", ">>" | Shifts |\n+-------------------------------------------------+---------------------------------------+\n| "+", "-" | Addition and subtraction |\n+-------------------------------------------------+---------------------------------------+\n| "*", "/", "//", "%" | Multiplication, division, remainder |\n| | [8] |\n+-------------------------------------------------+---------------------------------------+\n| "+x", "-x", "~x" | Positive, negative, bitwise NOT |\n+-------------------------------------------------+---------------------------------------+\n| "**" | Exponentiation [9] |\n+-------------------------------------------------+---------------------------------------+\n| "x[index]", "x[index:index]", | Subscription, slicing, call, |\n| "x(arguments...)", "x.attribute" | attribute reference |\n+-------------------------------------------------+---------------------------------------+\n| "(expressions...)", "[expressions...]", "{key: | Binding or tuple display, list |\n| value...}", "`expressions...`" | display, dictionary display, string |\n| | conversion |\n+-------------------------------------------------+---------------------------------------+\n\n-[ Footnotes ]-\n\n[1] In Python 2.3 and later releases, a list comprehension "leaks"\n the control variables of each "for" it contains into the\n containing scope. However, this behavior is deprecated, and\n relying on it will not work in Python 3.\n\n[2] While "abs(x%y) < abs(y)" is true mathematically, for floats\n it may not be true numerically due to roundoff. For example, and\n assuming a platform on which a Python float is an IEEE 754 double-\n precision number, in order that "-1e-100 % 1e100" have the same\n sign as "1e100", the computed result is "-1e-100 + 1e100", which\n is numerically exactly equal to "1e100". The function\n "math.fmod()" returns a result whose sign matches the sign of the\n first argument instead, and so returns "-1e-100" in this case.\n Which approach is more appropriate depends on the application.\n\n[3] If x is very close to an exact integer multiple of y, it\'s\n possible for "floor(x/y)" to be one larger than "(x-x%y)/y" due to\n rounding. In such cases, Python returns the latter result, in\n order to preserve that "divmod(x,y)[0] * y + x % y" be very close\n to "x".\n\n[4] While comparisons between unicode strings make sense at the\n byte level, they may be counter-intuitive to users. For example,\n the strings "u"\\u00C7"" and "u"\\u0043\\u0327"" compare differently,\n even though they both represent the same unicode character (LATIN\n CAPITAL LETTER C WITH CEDILLA). To compare strings in a human\n recognizable way, compare using "unicodedata.normalize()".\n\n[5] The implementation computes this efficiently, without\n constructing lists or sorting.\n\n[6] Earlier versions of Python used lexicographic comparison of\n the sorted (key, value) lists, but this was very expensive for the\n common case of comparing for equality. An even earlier version of\n Python compared dictionaries by identity only, but this caused\n surprises because people expected to be able to test a dictionary\n for emptiness by comparing it to "{}".\n\n[7] Due to automatic garbage-collection, free lists, and the\n dynamic nature of descriptors, you may notice seemingly unusual\n behaviour in certain uses of the "is" operator, like those\n involving comparisons between instance methods, or constants.\n Check their documentation for more info.\n\n[8] The "%" operator is also used for string formatting; the same\n precedence applies.\n\n[9] The power operator "**" binds less tightly than an arithmetic\n or bitwise unary operator on its right, that is, "2**-1" is "0.5".\n',
'pass': u'\nThe "pass" statement\n********************\n\n pass_stmt ::= "pass"\n\n"pass" is a null operation --- when it is executed, nothing happens.\nIt is useful as a placeholder when a statement is required\nsyntactically, but no code needs to be executed, for example:\n\n def f(arg): pass # a function that does nothing (yet)\n\n class C: pass # a class with no methods (yet)\n',
'power': u'\nThe power operator\n******************\n\nThe power operator binds more tightly than unary operators on its\nleft; it binds less tightly than unary operators on its right. The\nsyntax is:\n\n power ::= primary ["**" u_expr]\n\nThus, in an unparenthesized sequence of power and unary operators, the\noperators are evaluated from right to left (this does not constrain\nthe evaluation order for the operands): "-1**2" results in "-1".\n\nThe power operator has the same semantics as the built-in "pow()"\nfunction, when called with two arguments: it yields its left argument\nraised to the power of its right argument. The numeric arguments are\nfirst converted to a common type. The result type is that of the\narguments after coercion.\n\nWith mixed operand types, the coercion rules for binary arithmetic\noperators apply. For int and long int operands, the result has the\nsame type as the operands (after coercion) unless the second argument\nis negative; in that case, all arguments are converted to float and a\nfloat result is delivered. For example, "10**2" returns "100", but\n"10**-2" returns "0.01". (This last feature was added in Python 2.2.\nIn Python 2.1 and before, if both arguments were of integer types and\nthe second argument was negative, an exception was raised).\n\nRaising "0.0" to a negative power results in a "ZeroDivisionError".\nRaising a negative number to a fractional power results in a\n"ValueError".\n',
'print': u'\nThe "print" statement\n*********************\n\n print_stmt ::= "print" ([expression ("," expression)* [","]]\n | ">>" expression [("," expression)+ [","]])\n\n"print" evaluates each expression in turn and writes the resulting\nobject to standard output (see below). If an object is not a string,\nit is first converted to a string using the rules for string\nconversions. The (resulting or original) string is then written. A\nspace is written before each object is (converted and) written, unless\nthe output system believes it is positioned at the beginning of a\nline. This is the case (1) when no characters have yet been written\nto standard output, (2) when the last character written to standard\noutput is a whitespace character except "\' \'", or (3) when the last\nwrite operation on standard output was not a "print" statement. (In\nsome cases it may be functional to write an empty string to standard\noutput for this reason.)\n\nNote: Objects which act like file objects but which are not the\n built-in file objects often do not properly emulate this aspect of\n the file object\'s behavior, so it is best not to rely on this.\n\nA "\'\\n\'" character is written at the end, unless the "print" statement\nends with a comma. This is the only action if the statement contains\njust the keyword "print".\n\nStandard output is defined as the file object named "stdout" in the\nbuilt-in module "sys". If no such object exists, or if it does not\nhave a "write()" method, a "RuntimeError" exception is raised.\n\n"print" also has an extended form, defined by the second portion of\nthe syntax described above. This form is sometimes referred to as\n""print" chevron." In this form, the first expression after the ">>"\nmust evaluate to a "file-like" object, specifically an object that has\na "write()" method as described above. With this extended form, the\nsubsequent expressions are printed to this file object. If the first\nexpression evaluates to "None", then "sys.stdout" is used as the file\nfor output.\n',
'raise': u'\nThe "raise" statement\n*********************\n\n raise_stmt ::= "raise" [expression ["," expression ["," expression]]]\n\nIf no expressions are present, "raise" re-raises the last exception\nthat was active in the current scope. If no exception is active in\nthe current scope, a "TypeError" exception is raised indicating that\nthis is an error (if running under IDLE, a "Queue.Empty" exception is\nraised instead).\n\nOtherwise, "raise" evaluates the expressions to get three objects,\nusing "None" as the value of omitted expressions. The first two\nobjects are used to determine the *type* and *value* of the exception.\n\nIf the first object is an instance, the type of the exception is the\nclass of the instance, the instance itself is the value, and the\nsecond object must be "None".\n\nIf the first object is a class, it becomes the type of the exception.\nThe second object is used to determine the exception value: If it is\nan instance of the class, the instance becomes the exception value. If\nthe second object is a tuple, it is used as the argument list for the\nclass constructor; if it is "None", an empty argument list is used,\nand any other object is treated as a single argument to the\nconstructor. The instance so created by calling the constructor is\nused as the exception value.\n\nIf a third object is present and not "None", it must be a traceback\nobject (see section *The standard type hierarchy*), and it is\nsubstituted instead of the current location as the place where the\nexception occurred. If the third object is present and not a\ntraceback object or "None", a "TypeError" exception is raised. The\nthree-expression form of "raise" is useful to re-raise an exception\ntransparently in an except clause, but "raise" with no expressions\nshould be preferred if the exception to be re-raised was the most\nrecently active exception in the current scope.\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information about handling exceptions is in section\n*The try statement*.\n',
'return': u'\nThe "return" statement\n**********************\n\n return_stmt ::= "return" [expression_list]\n\n"return" may only occur syntactically nested in a function definition,\nnot within a nested class definition.\n\nIf an expression list is present, it is evaluated, else "None" is\nsubstituted.\n\n"return" leaves the current function call with the expression list (or\n"None") as return value.\n\nWhen "return" passes control out of a "try" statement with a "finally"\nclause, that "finally" clause is executed before really leaving the\nfunction.\n\nIn a generator function, the "return" statement is not allowed to\ninclude an "expression_list". In that context, a bare "return"\nindicates that the generator is done and will cause "StopIteration" to\nbe raised.\n',
'sequence-types': u'\nEmulating container types\n*************************\n\nThe following methods can be defined to implement container objects.\nContainers usually are sequences (such as lists or tuples) or mappings\n(like dictionaries), but can represent other containers as well. The\nfirst set of methods is used either to emulate a sequence or to\nemulate a mapping; the difference is that for a sequence, the\nallowable keys should be the integers *k* for which "0 <= k < N" where\n*N* is the length of the sequence, or slice objects, which define a\nrange of items. (For backwards compatibility, the method\n"__getslice__()" (see below) can also be defined to handle simple, but\nnot extended slices.) It is also recommended that mappings provide the\nmethods "keys()", "values()", "items()", "has_key()", "get()",\n"clear()", "setdefault()", "iterkeys()", "itervalues()",\n"iteritems()", "pop()", "popitem()", "copy()", and "update()" behaving\nsimilar to those for Python\'s standard dictionary objects. The\n"UserDict" module provides a "DictMixin" class to help create those\nmethods from a base set of "__getitem__()", "__setitem__()",\n"__delitem__()", and "keys()". Mutable sequences should provide\nmethods "append()", "count()", "index()", "extend()", "insert()",\n"pop()", "remove()", "reverse()" and "sort()", like Python standard\nlist objects. Finally, sequence types should implement addition\n(meaning concatenation) and multiplication (meaning repetition) by\ndefining the methods "__add__()", "__radd__()", "__iadd__()",\n"__mul__()", "__rmul__()" and "__imul__()" described below; they\nshould not define "__coerce__()" or other numerical operators. It is\nrecommended that both mappings and sequences implement the\n"__contains__()" method to allow efficient use of the "in" operator;\nfor mappings, "in" should be equivalent of "has_key()"; for sequences,\nit should search through the values. It is further recommended that\nboth mappings and sequences implement the "__iter__()" method to allow\nefficient iteration through the container; for mappings, "__iter__()"\nshould be the same as "iterkeys()"; for sequences, it should iterate\nthrough the values.\n\nobject.__len__(self)\n\n Called to implement the built-in function "len()". Should return\n the length of the object, an integer ">=" 0. Also, an object that\n doesn\'t define a "__nonzero__()" method and whose "__len__()"\n method returns zero is considered to be false in a Boolean context.\n\nobject.__getitem__(self, key)\n\n Called to implement evaluation of "self[key]". For sequence types,\n the accepted keys should be integers and slice objects. Note that\n the special interpretation of negative indexes (if the class wishes\n to emulate a sequence type) is up to the "__getitem__()" method. If\n *key* is of an inappropriate type, "TypeError" may be raised; if of\n a value outside the set of indexes for the sequence (after any\n special interpretation of negative values), "IndexError" should be\n raised. For mapping types, if *key* is missing (not in the\n container), "KeyError" should be raised.\n\n Note: "for" loops expect that an "IndexError" will be raised for\n illegal indexes to allow proper detection of the end of the\n sequence.\n\nobject.__setitem__(self, key, value)\n\n Called to implement assignment to "self[key]". Same note as for\n "__getitem__()". This should only be implemented for mappings if\n the objects support changes to the values for keys, or if new keys\n can be added, or for sequences if elements can be replaced. The\n same exceptions should be raised for improper *key* values as for\n the "__getitem__()" method.\n\nobject.__delitem__(self, key)\n\n Called to implement deletion of "self[key]". Same note as for\n "__getitem__()". This should only be implemented for mappings if\n the objects support removal of keys, or for sequences if elements\n can be removed from the sequence. The same exceptions should be\n raised for improper *key* values as for the "__getitem__()" method.\n\nobject.__iter__(self)\n\n This method is called when an iterator is required for a container.\n This method should return a new iterator object that can iterate\n over all the objects in the container. For mappings, it should\n iterate over the keys of the container, and should also be made\n available as the method "iterkeys()".\n\n Iterator objects also need to implement this method; they are\n required to return themselves. For more information on iterator\n objects, see *Iterator Types*.\n\nobject.__reversed__(self)\n\n Called (if present) by the "reversed()" built-in to implement\n reverse iteration. It should return a new iterator object that\n iterates over all the objects in the container in reverse order.\n\n If the "__reversed__()" method is not provided, the "reversed()"\n built-in will fall back to using the sequence protocol ("__len__()"\n and "__getitem__()"). Objects that support the sequence protocol\n should only provide "__reversed__()" if they can provide an\n implementation that is more efficient than the one provided by\n "reversed()".\n\n New in version 2.6.\n\nThe membership test operators ("in" and "not in") are normally\nimplemented as an iteration through a sequence. However, container\nobjects can supply the following special method with a more efficient\nimplementation, which also does not require the object be a sequence.\n\nobject.__contains__(self, item)\n\n Called to implement membership test operators. Should return true\n if *item* is in *self*, false otherwise. For mapping objects, this\n should consider the keys of the mapping rather than the values or\n the key-item pairs.\n\n For objects that don\'t define "__contains__()", the membership test\n first tries iteration via "__iter__()", then the old sequence\n iteration protocol via "__getitem__()", see *this section in the\n language reference*.\n',
'shifting': u'\nShifting operations\n*******************\n\nThe shifting operations have lower priority than the arithmetic\noperations:\n\n shift_expr ::= a_expr | shift_expr ( "<<" | ">>" ) a_expr\n\nThese operators accept plain or long integers as arguments. The\narguments are converted to a common type. They shift the first\nargument to the left or right by the number of bits given by the\nsecond argument.\n\nA right shift by *n* bits is defined as division by "pow(2, n)". A\nleft shift by *n* bits is defined as multiplication with "pow(2, n)".\nNegative shift counts raise a "ValueError" exception.\n\nNote: In the current implementation, the right-hand operand is\n required to be at most "sys.maxsize". If the right-hand operand is\n larger than "sys.maxsize" an "OverflowError" exception is raised.\n',
'slicings': u'\nSlicings\n********\n\nA slicing selects a range of items in a sequence object (e.g., a\nstring, tuple or list). Slicings may be used as expressions or as\ntargets in assignment or "del" statements. The syntax for a slicing:\n\n slicing ::= simple_slicing | extended_slicing\n simple_slicing ::= primary "[" short_slice "]"\n extended_slicing ::= primary "[" slice_list "]"\n slice_list ::= slice_item ("," slice_item)* [","]\n slice_item ::= expression | proper_slice | ellipsis\n proper_slice ::= short_slice | long_slice\n short_slice ::= [lower_bound] ":" [upper_bound]\n long_slice ::= short_slice ":" [stride]\n lower_bound ::= expression\n upper_bound ::= expression\n stride ::= expression\n ellipsis ::= "..."\n\nThere is ambiguity in the formal syntax here: anything that looks like\nan expression list also looks like a slice list, so any subscription\ncan be interpreted as a slicing. Rather than further complicating the\nsyntax, this is disambiguated by defining that in this case the\ninterpretation as a subscription takes priority over the\ninterpretation as a slicing (this is the case if the slice list\ncontains no proper slice nor ellipses). Similarly, when the slice\nlist has exactly one short slice and no trailing comma, the\ninterpretation as a simple slicing takes priority over that as an\nextended slicing.\n\nThe semantics for a simple slicing are as follows. The primary must\nevaluate to a sequence object. The lower and upper bound expressions,\nif present, must evaluate to plain integers; defaults are zero and the\n"sys.maxint", respectively. If either bound is negative, the\nsequence\'s length is added to it. The slicing now selects all items\nwith index *k* such that "i <= k < j" where *i* and *j* are the\nspecified lower and upper bounds. This may be an empty sequence. It\nis not an error if *i* or *j* lie outside the range of valid indexes\n(such items don\'t exist so they aren\'t selected).\n\nThe semantics for an extended slicing are as follows. The primary\nmust evaluate to a mapping object, and it is indexed with a key that\nis constructed from the slice list, as follows. If the slice list\ncontains at least one comma, the key is a tuple containing the\nconversion of the slice items; otherwise, the conversion of the lone\nslice item is the key. The conversion of a slice item that is an\nexpression is that expression. The conversion of an ellipsis slice\nitem is the built-in "Ellipsis" object. The conversion of a proper\nslice is a slice object (see section *The standard type hierarchy*)\nwhose "start", "stop" and "step" attributes are the values of the\nexpressions given as lower bound, upper bound and stride,\nrespectively, substituting "None" for missing expressions.\n',
'specialattrs': u'\nSpecial Attributes\n******************\n\nThe implementation adds a few special read-only attributes to several\nobject types, where they are relevant. Some of these are not reported\nby the "dir()" built-in function.\n\nobject.__dict__\n\n A dictionary or other mapping object used to store an object\'s\n (writable) attributes.\n\nobject.__methods__\n\n Deprecated since version 2.2: Use the built-in function "dir()" to\n get a list of an object\'s attributes. This attribute is no longer\n available.\n\nobject.__members__\n\n Deprecated since version 2.2: Use the built-in function "dir()" to\n get a list of an object\'s attributes. This attribute is no longer\n available.\n\ninstance.__class__\n\n The class to which a class instance belongs.\n\nclass.__bases__\n\n The tuple of base classes of a class object.\n\nclass.__name__\n\n The name of the class or type.\n\nThe following attributes are only supported by *new-style class*es.\n\nclass.__mro__\n\n This attribute is a tuple of classes that are considered when\n looking for base classes during method resolution.\n\nclass.mro()\n\n This method can be overridden by a metaclass to customize the\n method resolution order for its instances. It is called at class\n instantiation, and its result is stored in "__mro__".\n\nclass.__subclasses__()\n\n Each new-style class keeps a list of weak references to its\n immediate subclasses. This method returns a list of all those\n references still alive. Example:\n\n >>> int.__subclasses__()\n [<type \'bool\'>]\n\n-[ Footnotes ]-\n\n[1] Additional information on these special methods may be found\n in the Python Reference Manual (*Basic customization*).\n\n[2] As a consequence, the list "[1, 2]" is considered equal to\n "[1.0, 2.0]", and similarly for tuples.\n\n[3] They must have since the parser can\'t tell the type of the\n operands.\n\n[4] Cased characters are those with general category property\n being one of "Lu" (Letter, uppercase), "Ll" (Letter, lowercase),\n or "Lt" (Letter, titlecase).\n\n[5] To format only a tuple you should therefore provide a\n singleton tuple whose only element is the tuple to be formatted.\n\n[6] The advantage of leaving the newline on is that returning an\n empty string is then an unambiguous EOF indication. It is also\n possible (in cases where it might matter, for example, if you want\n to make an exact copy of a file while scanning its lines) to tell\n whether the last line of a file ended in a newline or not (yes\n this happens!).\n',
'specialnames': u'\nSpecial method names\n********************\n\nA class can implement certain operations that are invoked by special\nsyntax (such as arithmetic operations or subscripting and slicing) by\ndefining methods with special names. This is Python\'s approach to\n*operator overloading*, allowing classes to define their own behavior\nwith respect to language operators. For instance, if a class defines\na method named "__getitem__()", and "x" is an instance of this class,\nthen "x[i]" is roughly equivalent to "x.__getitem__(i)" for old-style\nclasses and "type(x).__getitem__(x, i)" for new-style classes. Except\nwhere mentioned, attempts to execute an operation raise an exception\nwhen no appropriate method is defined (typically "AttributeError" or\n"TypeError").\n\nWhen implementing a class that emulates any built-in type, it is\nimportant that the emulation only be implemented to the degree that it\nmakes sense for the object being modelled. For example, some\nsequences may work well with retrieval of individual elements, but\nextracting a slice may not make sense. (One example of this is the\n"NodeList" interface in the W3C\'s Document Object Model.)\n\n\nBasic customization\n===================\n\nobject.__new__(cls[, ...])\n\n Called to create a new instance of class *cls*. "__new__()" is a\n static method (special-cased so you need not declare it as such)\n that takes the class of which an instance was requested as its\n first argument. The remaining arguments are those passed to the\n object constructor expression (the call to the class). The return\n value of "__new__()" should be the new object instance (usually an\n instance of *cls*).\n\n Typical implementations create a new instance of the class by\n invoking the superclass\'s "__new__()" method using\n "super(currentclass, cls).__new__(cls[, ...])" with appropriate\n arguments and then modifying the newly-created instance as\n necessary before returning it.\n\n If "__new__()" returns an instance of *cls*, then the new\n instance\'s "__init__()" method will be invoked like\n "__init__(self[, ...])", where *self* is the new instance and the\n remaining arguments are the same as were passed to "__new__()".\n\n If "__new__()" does not return an instance of *cls*, then the new\n instance\'s "__init__()" method will not be invoked.\n\n "__new__()" is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation. It\n is also commonly overridden in custom metaclasses in order to\n customize class creation.\n\nobject.__init__(self[, ...])\n\n Called when the instance is created. The arguments are those\n passed to the class constructor expression. If a base class has an\n "__init__()" method, the derived class\'s "__init__()" method, if\n any, must explicitly call it to ensure proper initialization of the\n base class part of the instance; for example:\n "BaseClass.__init__(self, [args...])". As a special constraint on\n constructors, no value may be returned; doing so will cause a\n "TypeError" to be raised at runtime.\n\nobject.__del__(self)\n\n Called when the instance is about to be destroyed. This is also\n called a destructor. If a base class has a "__del__()" method, the\n derived class\'s "__del__()" method, if any, must explicitly call it\n to ensure proper deletion of the base class part of the instance.\n Note that it is possible (though not recommended!) for the\n "__del__()" method to postpone destruction of the instance by\n creating a new reference to it. It may then be called at a later\n time when this new reference is deleted. It is not guaranteed that\n "__del__()" methods are called for objects that still exist when\n the interpreter exits.\n\n Note: "del x" doesn\'t directly call "x.__del__()" --- the former\n decrements the reference count for "x" by one, and the latter is\n only called when "x"\'s reference count reaches zero. Some common\n situations that may prevent the reference count of an object from\n going to zero include: circular references between objects (e.g.,\n a doubly-linked list or a tree data structure with parent and\n child pointers); a reference to the object on the stack frame of\n a function that caught an exception (the traceback stored in\n "sys.exc_traceback" keeps the stack frame alive); or a reference\n to the object on the stack frame that raised an unhandled\n exception in interactive mode (the traceback stored in\n "sys.last_traceback" keeps the stack frame alive). The first\n situation can only be remedied by explicitly breaking the cycles;\n the latter two situations can be resolved by storing "None" in\n "sys.exc_traceback" or "sys.last_traceback". Circular references\n which are garbage are detected when the option cycle detector is\n enabled (it\'s on by default), but can only be cleaned up if there\n are no Python-level "__del__()" methods involved. Refer to the\n documentation for the "gc" module for more information about how\n "__del__()" methods are handled by the cycle detector,\n particularly the description of the "garbage" value.\n\n Warning: Due to the precarious circumstances under which\n "__del__()" methods are invoked, exceptions that occur during\n their execution are ignored, and a warning is printed to\n "sys.stderr" instead. Also, when "__del__()" is invoked in\n response to a module being deleted (e.g., when execution of the\n program is done), other globals referenced by the "__del__()"\n method may already have been deleted or in the process of being\n torn down (e.g. the import machinery shutting down). For this\n reason, "__del__()" methods should do the absolute minimum needed\n to maintain external invariants. Starting with version 1.5,\n Python guarantees that globals whose name begins with a single\n underscore are deleted from their module before other globals are\n deleted; if no other references to such globals exist, this may\n help in assuring that imported modules are still available at the\n time when the "__del__()" method is called.\n\n See also the *-R* command-line option.\n\nobject.__repr__(self)\n\n Called by the "repr()" built-in function and by string conversions\n (reverse quotes) to compute the "official" string representation of\n an object. If at all possible, this should look like a valid\n Python expression that could be used to recreate an object with the\n same value (given an appropriate environment). If this is not\n possible, a string of the form "<...some useful description...>"\n should be returned. The return value must be a string object. If a\n class defines "__repr__()" but not "__str__()", then "__repr__()"\n is also used when an "informal" string representation of instances\n of that class is required.\n\n This is typically used for debugging, so it is important that the\n representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n Called by the "str()" built-in function and by the "print"\n statement to compute the "informal" string representation of an\n object. This differs from "__repr__()" in that it does not have to\n be a valid Python expression: a more convenient or concise\n representation may be used instead. The return value must be a\n string object.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n New in version 2.1.\n\n These are the so-called "rich comparison" methods, and are called\n for comparison operators in preference to "__cmp__()" below. The\n correspondence between operator symbols and method names is as\n follows: "x<y" calls "x.__lt__(y)", "x<=y" calls "x.__le__(y)",\n "x==y" calls "x.__eq__(y)", "x!=y" and "x<>y" call "x.__ne__(y)",\n "x>y" calls "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)".\n\n A rich comparison method may return the singleton "NotImplemented"\n if it does not implement the operation for a given pair of\n arguments. By convention, "False" and "True" are returned for a\n successful comparison. However, these methods can return any value,\n so if the comparison operator is used in a Boolean context (e.g.,\n in the condition of an "if" statement), Python will call "bool()"\n on the value to determine if the result is true or false.\n\n There are no implied relationships among the comparison operators.\n The truth of "x==y" does not imply that "x!=y" is false.\n Accordingly, when defining "__eq__()", one should also define\n "__ne__()" so that the operators will behave as expected. See the\n paragraph on "__hash__()" for some important notes on creating\n *hashable* objects which support custom comparison operations and\n are usable as dictionary keys.\n\n There are no swapped-argument versions of these methods (to be used\n when the left argument does not support the operation but the right\n argument does); rather, "__lt__()" and "__gt__()" are each other\'s\n reflection, "__le__()" and "__ge__()" are each other\'s reflection,\n and "__eq__()" and "__ne__()" are their own reflection.\n\n Arguments to rich comparison methods are never coerced.\n\n To automatically generate ordering operations from a single root\n operation, see "functools.total_ordering()".\n\nobject.__cmp__(self, other)\n\n Called by comparison operations if rich comparison (see above) is\n not defined. Should return a negative integer if "self < other",\n zero if "self == other", a positive integer if "self > other". If\n no "__cmp__()", "__eq__()" or "__ne__()" operation is defined,\n class instances are compared by object identity ("address"). See\n also the description of "__hash__()" for some important notes on\n creating *hashable* objects which support custom comparison\n operations and are usable as dictionary keys. (Note: the\n restriction that exceptions are not propagated by "__cmp__()" has\n been removed since Python 1.5.)\n\nobject.__rcmp__(self, other)\n\n Changed in version 2.1: No longer supported.\n\nobject.__hash__(self)\n\n Called by built-in function "hash()" and for operations on members\n of hashed collections including "set", "frozenset", and "dict".\n "__hash__()" should return an integer. The only required property\n is that objects which compare equal have the same hash value; it is\n advised to somehow mix together (e.g. using exclusive or) the hash\n values for the components of the object that also play a part in\n comparison of objects.\n\n If a class does not define a "__cmp__()" or "__eq__()" method it\n should not define a "__hash__()" operation either; if it defines\n "__cmp__()" or "__eq__()" but not "__hash__()", its instances will\n not be usable in hashed collections. If a class defines mutable\n objects and implements a "__cmp__()" or "__eq__()" method, it\n should not implement "__hash__()", since hashable collection\n implementations require that a object\'s hash value is immutable (if\n the object\'s hash value changes, it will be in the wrong hash\n bucket).\n\n User-defined classes have "__cmp__()" and "__hash__()" methods by\n default; with them, all objects compare unequal (except with\n themselves) and "x.__hash__()" returns a result derived from\n "id(x)".\n\n Classes which inherit a "__hash__()" method from a parent class but\n change the meaning of "__cmp__()" or "__eq__()" such that the hash\n value returned is no longer appropriate (e.g. by switching to a\n value-based concept of equality instead of the default identity\n based equality) can explicitly flag themselves as being unhashable\n by setting "__hash__ = None" in the class definition. Doing so\n means that not only will instances of the class raise an\n appropriate "TypeError" when a program attempts to retrieve their\n hash value, but they will also be correctly identified as\n unhashable when checking "isinstance(obj, collections.Hashable)"\n (unlike classes which define their own "__hash__()" to explicitly\n raise "TypeError").\n\n Changed in version 2.5: "__hash__()" may now also return a long\n integer object; the 32-bit integer is then derived from the hash of\n that object.\n\n Changed in version 2.6: "__hash__" may now be set to "None" to\n explicitly flag instances of a class as unhashable.\n\nobject.__nonzero__(self)\n\n Called to implement truth value testing and the built-in operation\n "bool()"; should return "False" or "True", or their integer\n equivalents "0" or "1". When this method is not defined,\n "__len__()" is called, if it is defined, and the object is\n considered true if its result is nonzero. If a class defines\n neither "__len__()" nor "__nonzero__()", all its instances are\n considered true.\n\nobject.__unicode__(self)\n\n Called to implement "unicode()" built-in; should return a Unicode\n object. When this method is not defined, string conversion is\n attempted, and the result of string conversion is converted to\n Unicode using the system default encoding.\n\n\nCustomizing attribute access\n============================\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of "x.name") for\nclass instances.\n\nobject.__getattr__(self, name)\n\n Called when an attribute lookup has not found the attribute in the\n usual places (i.e. it is not an instance attribute nor is it found\n in the class tree for "self"). "name" is the attribute name. This\n method should return the (computed) attribute value or raise an\n "AttributeError" exception.\n\n Note that if the attribute is found through the normal mechanism,\n "__getattr__()" is not called. (This is an intentional asymmetry\n between "__getattr__()" and "__setattr__()".) This is done both for\n efficiency reasons and because otherwise "__getattr__()" would have\n no way to access other attributes of the instance. Note that at\n least for instance variables, you can fake total control by not\n inserting any values in the instance attribute dictionary (but\n instead inserting them in another object). See the\n "__getattribute__()" method below for a way to actually get total\n control in new-style classes.\n\nobject.__setattr__(self, name, value)\n\n Called when an attribute assignment is attempted. This is called\n instead of the normal mechanism (i.e. store the value in the\n instance dictionary). *name* is the attribute name, *value* is the\n value to be assigned to it.\n\n If "__setattr__()" wants to assign to an instance attribute, it\n should not simply execute "self.name = value" --- this would cause\n a recursive call to itself. Instead, it should insert the value in\n the dictionary of instance attributes, e.g., "self.__dict__[name] =\n value". For new-style classes, rather than accessing the instance\n dictionary, it should call the base class method with the same\n name, for example, "object.__setattr__(self, name, value)".\n\nobject.__delattr__(self, name)\n\n Like "__setattr__()" but for attribute deletion instead of\n assignment. This should only be implemented if "del obj.name" is\n meaningful for the object.\n\n\nMore attribute access for new-style classes\n-------------------------------------------\n\nThe following methods only apply to new-style classes.\n\nobject.__getattribute__(self, name)\n\n Called unconditionally to implement attribute accesses for\n instances of the class. If the class also defines "__getattr__()",\n the latter will not be called unless "__getattribute__()" either\n calls it explicitly or raises an "AttributeError". This method\n should return the (computed) attribute value or raise an\n "AttributeError" exception. In order to avoid infinite recursion in\n this method, its implementation should always call the base class\n method with the same name to access any attributes it needs, for\n example, "object.__getattribute__(self, name)".\n\n Note: This method may still be bypassed when looking up special\n methods as the result of implicit invocation via language syntax\n or built-in functions. See *Special method lookup for new-style\n classes*.\n\n\nImplementing Descriptors\n------------------------\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in an\n*owner* class (the descriptor must be in either the owner\'s class\ndictionary or in the class dictionary for one of its parents). In the\nexamples below, "the attribute" refers to the attribute whose name is\nthe key of the property in the owner class\' "__dict__".\n\nobject.__get__(self, instance, owner)\n\n Called to get the attribute of the owner class (class attribute\n access) or of an instance of that class (instance attribute\n access). *owner* is always the owner class, while *instance* is the\n instance that the attribute was accessed through, or "None" when\n the attribute is accessed through the *owner*. This method should\n return the (computed) attribute value or raise an "AttributeError"\n exception.\n\nobject.__set__(self, instance, value)\n\n Called to set the attribute on an instance *instance* of the owner\n class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n Called to delete the attribute on an instance *instance* of the\n owner class.\n\n\nInvoking Descriptors\n--------------------\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol: "__get__()", "__set__()", and\n"__delete__()". If any of those methods are defined for an object, it\nis said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, "a.x" has a\nlookup chain starting with "a.__dict__[\'x\']", then\n"type(a).__dict__[\'x\']", and continuing through the base classes of\n"type(a)" excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead. Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called. Note that descriptors are only invoked for new\nstyle objects or classes (ones that subclass "object()" or "type()").\n\nThe starting point for descriptor invocation is a binding, "a.x". How\nthe arguments are assembled depends on "a":\n\nDirect Call\n The simplest and least common call is when user code directly\n invokes a descriptor method: "x.__get__(a)".\n\nInstance Binding\n If binding to a new-style object instance, "a.x" is transformed\n into the call: "type(a).__dict__[\'x\'].__get__(a, type(a))".\n\nClass Binding\n If binding to a new-style class, "A.x" is transformed into the\n call: "A.__dict__[\'x\'].__get__(None, A)".\n\nSuper Binding\n If "a" is an instance of "super", then the binding "super(B,\n obj).m()" searches "obj.__class__.__mro__" for the base class "A"\n immediately preceding "B" and then invokes the descriptor with the\n call: "A.__dict__[\'m\'].__get__(obj, obj.__class__)".\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined. A descriptor can define\nany combination of "__get__()", "__set__()" and "__delete__()". If it\ndoes not define "__get__()", then accessing the attribute will return\nthe descriptor object itself unless there is a value in the object\'s\ninstance dictionary. If the descriptor defines "__set__()" and/or\n"__delete__()", it is a data descriptor; if it defines neither, it is\na non-data descriptor. Normally, data descriptors define both\n"__get__()" and "__set__()", while non-data descriptors have just the\n"__get__()" method. Data descriptors with "__set__()" and "__get__()"\ndefined always override a redefinition in an instance dictionary. In\ncontrast, non-data descriptors can be overridden by instances.\n\nPython methods (including "staticmethod()" and "classmethod()") are\nimplemented as non-data descriptors. Accordingly, instances can\nredefine and override methods. This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe "property()" function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n---------\n\nBy default, instances of both old and new-style classes have a\ndictionary for attribute storage. This wastes space for objects\nhaving very few instance variables. The space consumption can become\nacute when creating large numbers of instances.\n\nThe default can be overridden by defining *__slots__* in a new-style\nclass definition. The *__slots__* declaration takes a sequence of\ninstance variables and reserves just enough space in each instance to\nhold a value for each variable. Space is saved because *__dict__* is\nnot created for each instance.\n\n__slots__\n\n This class variable can be assigned a string, iterable, or sequence\n of strings with variable names used by instances. If defined in a\n new-style class, *__slots__* reserves space for the declared\n variables and prevents the automatic creation of *__dict__* and\n *__weakref__* for each instance.\n\n New in version 2.2.\n\nNotes on using *__slots__*\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n attribute of that class will always be accessible, so a *__slots__*\n definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n variables not listed in the *__slots__* definition. Attempts to\n assign to an unlisted variable name raises "AttributeError". If\n dynamic assignment of new variables is desired, then add\n "\'__dict__\'" to the sequence of strings in the *__slots__*\n declaration.\n\n Changed in version 2.3: Previously, adding "\'__dict__\'" to the\n *__slots__* declaration would not enable the assignment of new\n attributes not specifically listed in the sequence of instance\n variable names.\n\n* Without a *__weakref__* variable for each instance, classes\n defining *__slots__* do not support weak references to its\n instances. If weak reference support is needed, then add\n "\'__weakref__\'" to the sequence of strings in the *__slots__*\n declaration.\n\n Changed in version 2.3: Previously, adding "\'__weakref__\'" to the\n *__slots__* declaration would not enable support for weak\n references.\n\n* *__slots__* are implemented at the class level by creating\n descriptors (*Implementing Descriptors*) for each variable name. As\n a result, class attributes cannot be used to set default values for\n instance variables defined by *__slots__*; otherwise, the class\n attribute would overwrite the descriptor assignment.\n\n* The action of a *__slots__* declaration is limited to the class\n where it is defined. As a result, subclasses will have a *__dict__*\n unless they also define *__slots__* (which must only contain names\n of any *additional* slots).\n\n* If a class defines a slot also defined in a base class, the\n instance variable defined by the base class slot is inaccessible\n (except by retrieving its descriptor directly from the base class).\n This renders the meaning of the program undefined. In the future, a\n check may be added to prevent this.\n\n* Nonempty *__slots__* does not work for classes derived from\n "variable-length" built-in types such as "long", "str" and "tuple".\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings\n may also be used; however, in the future, special meaning may be\n assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n *__slots__*.\n\n Changed in version 2.6: Previously, *__class__* assignment raised an\n error if either new or old class had *__slots__*.\n\n\nCustomizing class creation\n==========================\n\nBy default, new-style classes are constructed using "type()". A class\ndefinition is read into a separate namespace and the value of class\nname is bound to the result of "type(name, bases, dict)".\n\nWhen the class definition is read, if *__metaclass__* is defined then\nthe callable assigned to it will be called instead of "type()". This\nallows classes or functions to be written which monitor or alter the\nclass creation process:\n\n* Modifying the class dictionary prior to the class being created.\n\n* Returning an instance of another class -- essentially performing\n the role of a factory function.\n\nThese steps will have to be performed in the metaclass\'s "__new__()"\nmethod -- "type.__new__()" can then be called from this method to\ncreate a class with different properties. This example adds a new\nelement to the class dictionary before creating the class:\n\n class metacls(type):\n def __new__(mcs, name, bases, dict):\n dict[\'foo\'] = \'metacls was here\'\n return type.__new__(mcs, name, bases, dict)\n\nYou can of course also override other class methods (or add new\nmethods); for example defining a custom "__call__()" method in the\nmetaclass allows custom behavior when the class is called, e.g. not\nalways creating a new instance.\n\n__metaclass__\n\n This variable can be any callable accepting arguments for "name",\n "bases", and "dict". Upon class creation, the callable is used\n instead of the built-in "type()".\n\n New in version 2.2.\n\nThe appropriate metaclass is determined by the following precedence\nrules:\n\n* If "dict[\'__metaclass__\']" exists, it is used.\n\n* Otherwise, if there is at least one base class, its metaclass is\n used (this looks for a *__class__* attribute first and if not found,\n uses its type).\n\n* Otherwise, if a global variable named __metaclass__ exists, it is\n used.\n\n* Otherwise, the old-style, classic metaclass (types.ClassType) is\n used.\n\nThe potential uses for metaclasses are boundless. Some ideas that have\nbeen explored including logging, interface checking, automatic\ndelegation, automatic property creation, proxies, frameworks, and\nautomatic resource locking/synchronization.\n\n\nCustomizing instance and subclass checks\n========================================\n\nNew in version 2.6.\n\nThe following methods are used to override the default behavior of the\n"isinstance()" and "issubclass()" built-in functions.\n\nIn particular, the metaclass "abc.ABCMeta" implements these methods in\norder to allow the addition of Abstract Base Classes (ABCs) as\n"virtual base classes" to any class or type (including built-in\ntypes), including other ABCs.\n\nclass.__instancecheck__(self, instance)\n\n Return true if *instance* should be considered a (direct or\n indirect) instance of *class*. If defined, called to implement\n "isinstance(instance, class)".\n\nclass.__subclasscheck__(self, subclass)\n\n Return true if *subclass* should be considered a (direct or\n indirect) subclass of *class*. If defined, called to implement\n "issubclass(subclass, class)".\n\nNote that these methods are looked up on the type (metaclass) of a\nclass. They cannot be defined as class methods in the actual class.\nThis is consistent with the lookup of special methods that are called\non instances, only in this case the instance is itself a class.\n\nSee also: **PEP 3119** - Introducing Abstract Base Classes\n\n Includes the specification for customizing "isinstance()" and\n "issubclass()" behavior through "__instancecheck__()" and\n "__subclasscheck__()", with motivation for this functionality in\n the context of adding Abstract Base Classes (see the "abc"\n module) to the language.\n\n\nEmulating callable objects\n==========================\n\nobject.__call__(self[, args...])\n\n Called when the instance is "called" as a function; if this method\n is defined, "x(arg1, arg2, ...)" is a shorthand for\n "x.__call__(arg1, arg2, ...)".\n\n\nEmulating container types\n=========================\n\nThe following methods can be defined to implement container objects.\nContainers usually are sequences (such as lists or tuples) or mappings\n(like dictionaries), but can represent other containers as well. The\nfirst set of methods is used either to emulate a sequence or to\nemulate a mapping; the difference is that for a sequence, the\nallowable keys should be the integers *k* for which "0 <= k < N" where\n*N* is the length of the sequence, or slice objects, which define a\nrange of items. (For backwards compatibility, the method\n"__getslice__()" (see below) can also be defined to handle simple, but\nnot extended slices.) It is also recommended that mappings provide the\nmethods "keys()", "values()", "items()", "has_key()", "get()",\n"clear()", "setdefault()", "iterkeys()", "itervalues()",\n"iteritems()", "pop()", "popitem()", "copy()", and "update()" behaving\nsimilar to those for Python\'s standard dictionary objects. The\n"UserDict" module provides a "DictMixin" class to help create those\nmethods from a base set of "__getitem__()", "__setitem__()",\n"__delitem__()", and "keys()". Mutable sequences should provide\nmethods "append()", "count()", "index()", "extend()", "insert()",\n"pop()", "remove()", "reverse()" and "sort()", like Python standard\nlist objects. Finally, sequence types should implement addition\n(meaning concatenation) and multiplication (meaning repetition) by\ndefining the methods "__add__()", "__radd__()", "__iadd__()",\n"__mul__()", "__rmul__()" and "__imul__()" described below; they\nshould not define "__coerce__()" or other numerical operators. It is\nrecommended that both mappings and sequences implement the\n"__contains__()" method to allow efficient use of the "in" operator;\nfor mappings, "in" should be equivalent of "has_key()"; for sequences,\nit should search through the values. It is further recommended that\nboth mappings and sequences implement the "__iter__()" method to allow\nefficient iteration through the container; for mappings, "__iter__()"\nshould be the same as "iterkeys()"; for sequences, it should iterate\nthrough the values.\n\nobject.__len__(self)\n\n Called to implement the built-in function "len()". Should return\n the length of the object, an integer ">=" 0. Also, an object that\n doesn\'t define a "__nonzero__()" method and whose "__len__()"\n method returns zero is considered to be false in a Boolean context.\n\nobject.__getitem__(self, key)\n\n Called to implement evaluation of "self[key]". For sequence types,\n the accepted keys should be integers and slice objects. Note that\n the special interpretation of negative indexes (if the class wishes\n to emulate a sequence type) is up to the "__getitem__()" method. If\n *key* is of an inappropriate type, "TypeError" may be raised; if of\n a value outside the set of indexes for the sequence (after any\n special interpretation of negative values), "IndexError" should be\n raised. For mapping types, if *key* is missing (not in the\n container), "KeyError" should be raised.\n\n Note: "for" loops expect that an "IndexError" will be raised for\n illegal indexes to allow proper detection of the end of the\n sequence.\n\nobject.__setitem__(self, key, value)\n\n Called to implement assignment to "self[key]". Same note as for\n "__getitem__()". This should only be implemented for mappings if\n the objects support changes to the values for keys, or if new keys\n can be added, or for sequences if elements can be replaced. The\n same exceptions should be raised for improper *key* values as for\n the "__getitem__()" method.\n\nobject.__delitem__(self, key)\n\n Called to implement deletion of "self[key]". Same note as for\n "__getitem__()". This should only be implemented for mappings if\n the objects support removal of keys, or for sequences if elements\n can be removed from the sequence. The same exceptions should be\n raised for improper *key* values as for the "__getitem__()" method.\n\nobject.__iter__(self)\n\n This method is called when an iterator is required for a container.\n This method should return a new iterator object that can iterate\n over all the objects in the container. For mappings, it should\n iterate over the keys of the container, and should also be made\n available as the method "iterkeys()".\n\n Iterator objects also need to implement this method; they are\n required to return themselves. For more information on iterator\n objects, see *Iterator Types*.\n\nobject.__reversed__(self)\n\n Called (if present) by the "reversed()" built-in to implement\n reverse iteration. It should return a new iterator object that\n iterates over all the objects in the container in reverse order.\n\n If the "__reversed__()" method is not provided, the "reversed()"\n built-in will fall back to using the sequence protocol ("__len__()"\n and "__getitem__()"). Objects that support the sequence protocol\n should only provide "__reversed__()" if they can provide an\n implementation that is more efficient than the one provided by\n "reversed()".\n\n New in version 2.6.\n\nThe membership test operators ("in" and "not in") are normally\nimplemented as an iteration through a sequence. However, container\nobjects can supply the following special method with a more efficient\nimplementation, which also does not require the object be a sequence.\n\nobject.__contains__(self, item)\n\n Called to implement membership test operators. Should return true\n if *item* is in *self*, false otherwise. For mapping objects, this\n should consider the keys of the mapping rather than the values or\n the key-item pairs.\n\n For objects that don\'t define "__contains__()", the membership test\n first tries iteration via "__iter__()", then the old sequence\n iteration protocol via "__getitem__()", see *this section in the\n language reference*.\n\n\nAdditional methods for emulation of sequence types\n==================================================\n\nThe following optional methods can be defined to further emulate\nsequence objects. Immutable sequences methods should at most only\ndefine "__getslice__()"; mutable sequences might define all three\nmethods.\n\nobject.__getslice__(self, i, j)\n\n Deprecated since version 2.0: Support slice objects as parameters\n to the "__getitem__()" method. (However, built-in types in CPython\n currently still implement "__getslice__()". Therefore, you have to\n override it in derived classes when implementing slicing.)\n\n Called to implement evaluation of "self[i:j]". The returned object\n should be of the same type as *self*. Note that missing *i* or *j*\n in the slice expression are replaced by zero or "sys.maxint",\n respectively. If negative indexes are used in the slice, the\n length of the sequence is added to that index. If the instance does\n not implement the "__len__()" method, an "AttributeError" is\n raised. No guarantee is made that indexes adjusted this way are not\n still negative. Indexes which are greater than the length of the\n sequence are not modified. If no "__getslice__()" is found, a slice\n object is created instead, and passed to "__getitem__()" instead.\n\nobject.__setslice__(self, i, j, sequence)\n\n Called to implement assignment to "self[i:j]". Same notes for *i*\n and *j* as for "__getslice__()".\n\n This method is deprecated. If no "__setslice__()" is found, or for\n extended slicing of the form "self[i:j:k]", a slice object is\n created, and passed to "__setitem__()", instead of "__setslice__()"\n being called.\n\nobject.__delslice__(self, i, j)\n\n Called to implement deletion of "self[i:j]". Same notes for *i* and\n *j* as for "__getslice__()". This method is deprecated. If no\n "__delslice__()" is found, or for extended slicing of the form\n "self[i:j:k]", a slice object is created, and passed to\n "__delitem__()", instead of "__delslice__()" being called.\n\nNotice that these methods are only invoked when a single slice with a\nsingle colon is used, and the slice method is available. For slice\noperations involving extended slice notation, or in absence of the\nslice methods, "__getitem__()", "__setitem__()" or "__delitem__()" is\ncalled with a slice object as argument.\n\nThe following example demonstrate how to make your program or module\ncompatible with earlier versions of Python (assuming that methods\n"__getitem__()", "__setitem__()" and "__delitem__()" support slice\nobjects as arguments):\n\n class MyClass:\n ...\n def __getitem__(self, index):\n ...\n def __setitem__(self, index, value):\n ...\n def __delitem__(self, index):\n ...\n\n if sys.version_info < (2, 0):\n # They won\'t be defined if version is at least 2.0 final\n\n def __getslice__(self, i, j):\n return self[max(0, i):max(0, j):]\n def __setslice__(self, i, j, seq):\n self[max(0, i):max(0, j):] = seq\n def __delslice__(self, i, j):\n del self[max(0, i):max(0, j):]\n ...\n\nNote the calls to "max()"; these are necessary because of the handling\nof negative indices before the "__*slice__()" methods are called.\nWhen negative indexes are used, the "__*item__()" methods receive them\nas provided, but the "__*slice__()" methods get a "cooked" form of the\nindex values. For each negative index value, the length of the\nsequence is added to the index before calling the method (which may\nstill result in a negative index); this is the customary handling of\nnegative indexes by the built-in sequence types, and the "__*item__()"\nmethods are expected to do this as well. However, since they should\nalready be doing that, negative indexes cannot be passed in; they must\nbe constrained to the bounds of the sequence before being passed to\nthe "__*item__()" methods. Calling "max(0, i)" conveniently returns\nthe proper value.\n\n\nEmulating numeric types\n=======================\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations ("+", "-", "*", "//", "%", "divmod()", "pow()", "**",\n "<<", ">>", "&", "^", "|"). For instance, to evaluate the\n expression "x + y", where *x* is an instance of a class that has an\n "__add__()" method, "x.__add__(y)" is called. The "__divmod__()"\n method should be the equivalent to using "__floordiv__()" and\n "__mod__()"; it should not be related to "__truediv__()" (described\n below). Note that "__pow__()" should be defined to accept an\n optional third argument if the ternary version of the built-in\n "pow()" function is to be supported.\n\n If one of those methods does not support the operation with the\n supplied arguments, it should return "NotImplemented".\n\nobject.__div__(self, other)\nobject.__truediv__(self, other)\n\n The division operator ("/") is implemented by these methods. The\n "__truediv__()" method is used when "__future__.division" is in\n effect, otherwise "__div__()" is used. If only one of these two\n methods is defined, the object will not support division in the\n alternate context; "TypeError" will be raised instead.\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rdiv__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations ("+", "-", "*", "/", "%", "divmod()", "pow()", "**",\n "<<", ">>", "&", "^", "|") with reflected (swapped) operands.\n These functions are only called if the left operand does not\n support the corresponding operation and the operands are of\n different types. [2] For instance, to evaluate the expression "x -\n y", where *y* is an instance of a class that has an "__rsub__()"\n method, "y.__rsub__(x)" is called if "x.__sub__(y)" returns\n *NotImplemented*.\n\n Note that ternary "pow()" will not try calling "__rpow__()" (the\n coercion rules would become too complicated).\n\n Note: If the right operand\'s type is a subclass of the left\n operand\'s type and that subclass provides the reflected method\n for the operation, this method will be called before the left\n operand\'s non-reflected method. This behavior allows subclasses\n to override their ancestors\' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__idiv__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n These methods are called to implement the augmented arithmetic\n assignments ("+=", "-=", "*=", "/=", "//=", "%=", "**=", "<<=",\n ">>=", "&=", "^=", "|="). These methods should attempt to do the\n operation in-place (modifying *self*) and return the result (which\n could be, but does not have to be, *self*). If a specific method\n is not defined, the augmented assignment falls back to the normal\n methods. For instance, to execute the statement "x += y", where\n *x* is an instance of a class that has an "__iadd__()" method,\n "x.__iadd__(y)" is called. If *x* is an instance of a class that\n does not define a "__iadd__()" method, "x.__add__(y)" and\n "y.__radd__(x)" are considered, as with the evaluation of "x + y".\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n Called to implement the unary arithmetic operations ("-", "+",\n "abs()" and "~").\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__long__(self)\nobject.__float__(self)\n\n Called to implement the built-in functions "complex()", "int()",\n "long()", and "float()". Should return a value of the appropriate\n type.\n\nobject.__oct__(self)\nobject.__hex__(self)\n\n Called to implement the built-in functions "oct()" and "hex()".\n Should return a string value.\n\nobject.__index__(self)\n\n Called to implement "operator.index()". Also called whenever\n Python needs an integer object (such as in slicing). Must return\n an integer (int or long).\n\n New in version 2.5.\n\nobject.__coerce__(self, other)\n\n Called to implement "mixed-mode" numeric arithmetic. Should either\n return a 2-tuple containing *self* and *other* converted to a\n common numeric type, or "None" if conversion is impossible. When\n the common type would be the type of "other", it is sufficient to\n return "None", since the interpreter will also ask the other object\n to attempt a coercion (but sometimes, if the implementation of the\n other type cannot be changed, it is useful to do the conversion to\n the other type here). A return value of "NotImplemented" is\n equivalent to returning "None".\n\n\nCoercion rules\n==============\n\nThis section used to document the rules for coercion. As the language\nhas evolved, the coercion rules have become hard to document\nprecisely; documenting what one version of one particular\nimplementation does is undesirable. Instead, here are some informal\nguidelines regarding coercion. In Python 3, coercion will not be\nsupported.\n\n* If the left operand of a % operator is a string or Unicode object,\n no coercion takes place and the string formatting operation is\n invoked instead.\n\n* It is no longer recommended to define a coercion operation. Mixed-\n mode operations on types that don\'t define coercion pass the\n original arguments to the operation.\n\n* New-style classes (those derived from "object") never invoke the\n "__coerce__()" method in response to a binary operator; the only\n time "__coerce__()" is invoked is when the built-in function\n "coerce()" is called.\n\n* For most intents and purposes, an operator that returns\n "NotImplemented" is treated the same as one that is not implemented\n at all.\n\n* Below, "__op__()" and "__rop__()" are used to signify the generic\n method names corresponding to an operator; "__iop__()" is used for\n the corresponding in-place operator. For example, for the operator\n \'"+"\', "__add__()" and "__radd__()" are used for the left and right\n variant of the binary operator, and "__iadd__()" for the in-place\n variant.\n\n* For objects *x* and *y*, first "x.__op__(y)" is tried. If this is\n not implemented or returns "NotImplemented", "y.__rop__(x)" is\n tried. If this is also not implemented or returns "NotImplemented",\n a "TypeError" exception is raised. But see the following exception:\n\n* Exception to the previous item: if the left operand is an instance\n of a built-in type or a new-style class, and the right operand is an\n instance of a proper subclass of that type or class and overrides\n the base\'s "__rop__()" method, the right operand\'s "__rop__()"\n method is tried *before* the left operand\'s "__op__()" method.\n\n This is done so that a subclass can completely override binary\n operators. Otherwise, the left operand\'s "__op__()" method would\n always accept the right operand: when an instance of a given class\n is expected, an instance of a subclass of that class is always\n acceptable.\n\n* When either operand type defines a coercion, this coercion is\n called before that type\'s "__op__()" or "__rop__()" method is\n called, but no sooner. If the coercion returns an object of a\n different type for the operand whose coercion is invoked, part of\n the process is redone using the new object.\n\n* When an in-place operator (like \'"+="\') is used, if the left\n operand implements "__iop__()", it is invoked without any coercion.\n When the operation falls back to "__op__()" and/or "__rop__()", the\n normal coercion rules apply.\n\n* In "x + y", if *x* is a sequence that implements sequence\n concatenation, sequence concatenation is invoked.\n\n* In "x * y", if one operand is a sequence that implements sequence\n repetition, and the other is an integer ("int" or "long"), sequence\n repetition is invoked.\n\n* Rich comparisons (implemented by methods "__eq__()" and so on)\n never use coercion. Three-way comparison (implemented by\n "__cmp__()") does use coercion under the same conditions as other\n binary operations use it.\n\n* In the current implementation, the built-in numeric types "int",\n "long", "float", and "complex" do not use coercion. All these types\n implement a "__coerce__()" method, for use by the built-in\n "coerce()" function.\n\n Changed in version 2.7: The complex type no longer makes implicit\n calls to the "__coerce__()" method for mixed-type binary arithmetic\n operations.\n\n\nWith Statement Context Managers\n===============================\n\nNew in version 2.5.\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a "with" statement. The context manager\nhandles the entry into, and the exit from, the desired runtime context\nfor the execution of the block of code. Context managers are normally\ninvoked using the "with" statement (described in section *The with\nstatement*), but can also be used by directly invoking their methods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see *Context Manager Types*.\n\nobject.__enter__(self)\n\n Enter the runtime context related to this object. The "with"\n statement will bind this method\'s return value to the target(s)\n specified in the "as" clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n Exit the runtime context related to this object. The parameters\n describe the exception that caused the context to be exited. If the\n context was exited without an exception, all three arguments will\n be "None".\n\n If an exception is supplied, and the method wishes to suppress the\n exception (i.e., prevent it from being propagated), it should\n return a true value. Otherwise, the exception will be processed\n normally upon exit from this method.\n\n Note that "__exit__()" methods should not reraise the passed-in\n exception; this is the caller\'s responsibility.\n\nSee also: **PEP 0343** - The "with" statement\n\n The specification, background, and examples for the Python "with"\n statement.\n\n\nSpecial method lookup for old-style classes\n===========================================\n\nFor old-style classes, special methods are always looked up in exactly\nthe same way as any other method or attribute. This is the case\nregardless of whether the method is being looked up explicitly as in\n"x.__getitem__(i)" or implicitly as in "x[i]".\n\nThis behaviour means that special methods may exhibit different\nbehaviour for different instances of a single old-style class if the\nappropriate special attributes are set differently:\n\n >>> class C:\n ... pass\n ...\n >>> c1 = C()\n >>> c2 = C()\n >>> c1.__len__ = lambda: 5\n >>> c2.__len__ = lambda: 9\n >>> len(c1)\n 5\n >>> len(c2)\n 9\n\n\nSpecial method lookup for new-style classes\n===========================================\n\nFor new-style classes, implicit invocations of special methods are\nonly guaranteed to work correctly if defined on an object\'s type, not\nin the object\'s instance dictionary. That behaviour is the reason why\nthe following code raises an exception (unlike the equivalent example\nwith old-style classes):\n\n >>> class C(object):\n ... pass\n ...\n >>> c = C()\n >>> c.__len__ = lambda: 5\n >>> len(c)\n Traceback (most recent call last):\n File "<stdin>", line 1, in <module>\n TypeError: object of type \'C\' has no len()\n\nThe rationale behind this behaviour lies with a number of special\nmethods such as "__hash__()" and "__repr__()" that are implemented by\nall objects, including type objects. If the implicit lookup of these\nmethods used the conventional lookup process, they would fail when\ninvoked on the type object itself:\n\n >>> 1 .__hash__() == hash(1)\n True\n >>> int.__hash__() == hash(int)\n Traceback (most recent call last):\n File "<stdin>", line 1, in <module>\n TypeError: descriptor \'__hash__\' of \'int\' object needs an argument\n\nIncorrectly attempting to invoke an unbound method of a class in this\nway is sometimes referred to as \'metaclass confusion\', and is avoided\nby bypassing the instance when looking up special methods:\n\n >>> type(1).__hash__(1) == hash(1)\n True\n >>> type(int).__hash__(int) == hash(int)\n True\n\nIn addition to bypassing any instance attributes in the interest of\ncorrectness, implicit special method lookup generally also bypasses\nthe "__getattribute__()" method even of the object\'s metaclass:\n\n >>> class Meta(type):\n ... def __getattribute__(*args):\n ... print "Metaclass getattribute invoked"\n ... return type.__getattribute__(*args)\n ...\n >>> class C(object):\n ... __metaclass__ = Meta\n ... def __len__(self):\n ... return 10\n ... def __getattribute__(*args):\n ... print "Class getattribute invoked"\n ... return object.__getattribute__(*args)\n ...\n >>> c = C()\n >>> c.__len__() # Explicit lookup via instance\n Class getattribute invoked\n 10\n >>> type(c).__len__(c) # Explicit lookup via type\n Metaclass getattribute invoked\n 10\n >>> len(c) # Implicit lookup\n 10\n\nBypassing the "__getattribute__()" machinery in this fashion provides\nsignificant scope for speed optimisations within the interpreter, at\nthe cost of some flexibility in the handling of special methods (the\nspecial method *must* be set on the class object itself in order to be\nconsistently invoked by the interpreter).\n\n-[ Footnotes ]-\n\n[1] It *is* possible in some cases to change an object\'s type,\n under certain controlled conditions. It generally isn\'t a good\n idea though, since it can lead to some very strange behaviour if\n it is handled incorrectly.\n\n[2] For operands of the same type, it is assumed that if the non-\n reflected method (such as "__add__()") fails the operation is not\n supported, which is why the reflected method is not called.\n',
'string-methods': u'\nString Methods\n**************\n\nBelow are listed the string methods which both 8-bit strings and\nUnicode objects support. Some of them are also available on\n"bytearray" objects.\n\nIn addition, Python\'s strings support the sequence type methods\ndescribed in the *Sequence Types --- str, unicode, list, tuple,\nbytearray, buffer, xrange* section. To output formatted strings use\ntemplate strings or the "%" operator described in the *String\nFormatting Operations* section. Also, see the "re" module for string\nfunctions based on regular expressions.\n\nstr.capitalize()\n\n Return a copy of the string with its first character capitalized\n and the rest lowercased.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.center(width[, fillchar])\n\n Return centered in a string of length *width*. Padding is done\n using the specified *fillchar* (default is a space).\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.count(sub[, start[, end]])\n\n Return the number of non-overlapping occurrences of substring *sub*\n in the range [*start*, *end*]. Optional arguments *start* and\n *end* are interpreted as in slice notation.\n\nstr.decode([encoding[, errors]])\n\n Decodes the string using the codec registered for *encoding*.\n *encoding* defaults to the default string encoding. *errors* may\n be given to set a different error handling scheme. The default is\n "\'strict\'", meaning that encoding errors raise "UnicodeError".\n Other possible values are "\'ignore\'", "\'replace\'" and any other\n name registered via "codecs.register_error()", see section *Codec\n Base Classes*.\n\n New in version 2.2.\n\n Changed in version 2.3: Support for other error handling schemes\n added.\n\n Changed in version 2.7: Support for keyword arguments added.\n\nstr.encode([encoding[, errors]])\n\n Return an encoded version of the string. Default encoding is the\n current default string encoding. *errors* may be given to set a\n different error handling scheme. The default for *errors* is\n "\'strict\'", meaning that encoding errors raise a "UnicodeError".\n Other possible values are "\'ignore\'", "\'replace\'",\n "\'xmlcharrefreplace\'", "\'backslashreplace\'" and any other name\n registered via "codecs.register_error()", see section *Codec Base\n Classes*. For a list of possible encodings, see section *Standard\n Encodings*.\n\n New in version 2.0.\n\n Changed in version 2.3: Support for "\'xmlcharrefreplace\'" and\n "\'backslashreplace\'" and other error handling schemes added.\n\n Changed in version 2.7: Support for keyword arguments added.\n\nstr.endswith(suffix[, start[, end]])\n\n Return "True" if the string ends with the specified *suffix*,\n otherwise return "False". *suffix* can also be a tuple of suffixes\n to look for. With optional *start*, test beginning at that\n position. With optional *end*, stop comparing at that position.\n\n Changed in version 2.5: Accept tuples as *suffix*.\n\nstr.expandtabs([tabsize])\n\n Return a copy of the string where all tab characters are replaced\n by one or more spaces, depending on the current column and the\n given tab size. Tab positions occur every *tabsize* characters\n (default is 8, giving tab positions at columns 0, 8, 16 and so on).\n To expand the string, the current column is set to zero and the\n string is examined character by character. If the character is a\n tab ("\\t"), one or more space characters are inserted in the result\n until the current column is equal to the next tab position. (The\n tab character itself is not copied.) If the character is a newline\n ("\\n") or return ("\\r"), it is copied and the current column is\n reset to zero. Any other character is copied unchanged and the\n current column is incremented by one regardless of how the\n character is represented when printed.\n\n >>> \'01\\t012\\t0123\\t01234\'.expandtabs()\n \'01 012 0123 01234\'\n >>> \'01\\t012\\t0123\\t01234\'.expandtabs(4)\n \'01 012 0123 01234\'\n\nstr.find(sub[, start[, end]])\n\n Return the lowest index in the string where substring *sub* is\n found, such that *sub* is contained in the slice "s[start:end]".\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return "-1" if *sub* is not found.\n\n Note: The "find()" method should be used only if you need to know\n the position of *sub*. To check if *sub* is a substring or not,\n use the "in" operator:\n\n >>> \'Py\' in \'Python\'\n True\n\nstr.format(*args, **kwargs)\n\n Perform a string formatting operation. The string on which this\n method is called can contain literal text or replacement fields\n delimited by braces "{}". Each replacement field contains either\n the numeric index of a positional argument, or the name of a\n keyword argument. Returns a copy of the string where each\n replacement field is replaced with the string value of the\n corresponding argument.\n\n >>> "The sum of 1 + 2 is {0}".format(1+2)\n \'The sum of 1 + 2 is 3\'\n\n See *Format String Syntax* for a description of the various\n formatting options that can be specified in format strings.\n\n This method of string formatting is the new standard in Python 3,\n and should be preferred to the "%" formatting described in *String\n Formatting Operations* in new code.\n\n New in version 2.6.\n\nstr.index(sub[, start[, end]])\n\n Like "find()", but raise "ValueError" when the substring is not\n found.\n\nstr.isalnum()\n\n Return true if all characters in the string are alphanumeric and\n there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isalpha()\n\n Return true if all characters in the string are alphabetic and\n there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isdigit()\n\n Return true if all characters in the string are digits and there is\n at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.islower()\n\n Return true if all cased characters [4] in the string are lowercase\n and there is at least one cased character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isspace()\n\n Return true if there are only whitespace characters in the string\n and there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.istitle()\n\n Return true if the string is a titlecased string and there is at\n least one character, for example uppercase characters may only\n follow uncased characters and lowercase characters only cased ones.\n Return false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isupper()\n\n Return true if all cased characters [4] in the string are uppercase\n and there is at least one cased character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.join(iterable)\n\n Return a string which is the concatenation of the strings in the\n *iterable* *iterable*. The separator between elements is the\n string providing this method.\n\nstr.ljust(width[, fillchar])\n\n Return the string left justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than or\n equal to "len(s)".\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.lower()\n\n Return a copy of the string with all the cased characters [4]\n converted to lowercase.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.lstrip([chars])\n\n Return a copy of the string with leading characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or "None", the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a prefix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.lstrip()\n \'spacious \'\n >>> \'www.example.com\'.lstrip(\'cmowz.\')\n \'example.com\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.partition(sep)\n\n Split the string at the first occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing the string itself, followed by\n two empty strings.\n\n New in version 2.5.\n\nstr.replace(old, new[, count])\n\n Return a copy of the string with all occurrences of substring *old*\n replaced by *new*. If the optional argument *count* is given, only\n the first *count* occurrences are replaced.\n\nstr.rfind(sub[, start[, end]])\n\n Return the highest index in the string where substring *sub* is\n found, such that *sub* is contained within "s[start:end]".\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return "-1" on failure.\n\nstr.rindex(sub[, start[, end]])\n\n Like "rfind()" but raises "ValueError" when the substring *sub* is\n not found.\n\nstr.rjust(width[, fillchar])\n\n Return the string right justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than or\n equal to "len(s)".\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.rpartition(sep)\n\n Split the string at the last occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing two empty strings, followed by\n the string itself.\n\n New in version 2.5.\n\nstr.rsplit([sep[, maxsplit]])\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit* splits\n are done, the *rightmost* ones. If *sep* is not specified or\n "None", any whitespace string is a separator. Except for splitting\n from the right, "rsplit()" behaves like "split()" which is\n described in detail below.\n\n New in version 2.4.\n\nstr.rstrip([chars])\n\n Return a copy of the string with trailing characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or "None", the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a suffix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.rstrip()\n \' spacious\'\n >>> \'mississippi\'.rstrip(\'ipz\')\n \'mississ\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.split([sep[, maxsplit]])\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit*\n splits are done (thus, the list will have at most "maxsplit+1"\n elements). If *maxsplit* is not specified or "-1", then there is\n no limit on the number of splits (all possible splits are made).\n\n If *sep* is given, consecutive delimiters are not grouped together\n and are deemed to delimit empty strings (for example,\n "\'1,,2\'.split(\',\')" returns "[\'1\', \'\', \'2\']"). The *sep* argument\n may consist of multiple characters (for example,\n "\'1<>2<>3\'.split(\'<>\')" returns "[\'1\', \'2\', \'3\']"). Splitting an\n empty string with a specified separator returns "[\'\']".\n\n If *sep* is not specified or is "None", a different splitting\n algorithm is applied: runs of consecutive whitespace are regarded\n as a single separator, and the result will contain no empty strings\n at the start or end if the string has leading or trailing\n whitespace. Consequently, splitting an empty string or a string\n consisting of just whitespace with a "None" separator returns "[]".\n\n For example, "\' 1 2 3 \'.split()" returns "[\'1\', \'2\', \'3\']", and\n "\' 1 2 3 \'.split(None, 1)" returns "[\'1\', \'2 3 \']".\n\nstr.splitlines([keepends])\n\n Return a list of the lines in the string, breaking at line\n boundaries. This method uses the *universal newlines* approach to\n splitting lines. Line breaks are not included in the resulting list\n unless *keepends* is given and true.\n\n For example, "\'ab c\\n\\nde fg\\rkl\\r\\n\'.splitlines()" returns "[\'ab\n c\', \'\', \'de fg\', \'kl\']", while the same call with\n "splitlines(True)" returns "[\'ab c\\n\', \'\\n\', \'de fg\\r\', \'kl\\r\\n\']".\n\n Unlike "split()" when a delimiter string *sep* is given, this\n method returns an empty list for the empty string, and a terminal\n line break does not result in an extra line.\n\nstr.startswith(prefix[, start[, end]])\n\n Return "True" if string starts with the *prefix*, otherwise return\n "False". *prefix* can also be a tuple of prefixes to look for.\n With optional *start*, test string beginning at that position.\n With optional *end*, stop comparing string at that position.\n\n Changed in version 2.5: Accept tuples as *prefix*.\n\nstr.strip([chars])\n\n Return a copy of the string with the leading and trailing\n characters removed. The *chars* argument is a string specifying the\n set of characters to be removed. If omitted or "None", the *chars*\n argument defaults to removing whitespace. The *chars* argument is\n not a prefix or suffix; rather, all combinations of its values are\n stripped:\n\n >>> \' spacious \'.strip()\n \'spacious\'\n >>> \'www.example.com\'.strip(\'cmowz.\')\n \'example\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.swapcase()\n\n Return a copy of the string with uppercase characters converted to\n lowercase and vice versa.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.title()\n\n Return a titlecased version of the string where words start with an\n uppercase character and the remaining characters are lowercase.\n\n The algorithm uses a simple language-independent definition of a\n word as groups of consecutive letters. The definition works in\n many contexts but it means that apostrophes in contractions and\n possessives form word boundaries, which may not be the desired\n result:\n\n >>> "they\'re bill\'s friends from the UK".title()\n "They\'Re Bill\'S Friends From The Uk"\n\n A workaround for apostrophes can be constructed using regular\n expressions:\n\n >>> import re\n >>> def titlecase(s):\n ... return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n ... lambda mo: mo.group(0)[0].upper() +\n ... mo.group(0)[1:].lower(),\n ... s)\n ...\n >>> titlecase("they\'re bill\'s friends.")\n "They\'re Bill\'s Friends."\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.translate(table[, deletechars])\n\n Return a copy of the string where all characters occurring in the\n optional argument *deletechars* are removed, and the remaining\n characters have been mapped through the given translation table,\n which must be a string of length 256.\n\n You can use the "maketrans()" helper function in the "string"\n module to create a translation table. For string objects, set the\n *table* argument to "None" for translations that only delete\n characters:\n\n >>> \'read this short text\'.translate(None, \'aeiou\')\n \'rd ths shrt txt\'\n\n New in version 2.6: Support for a "None" *table* argument.\n\n For Unicode objects, the "translate()" method does not accept the\n optional *deletechars* argument. Instead, it returns a copy of the\n *s* where all characters have been mapped through the given\n translation table which must be a mapping of Unicode ordinals to\n Unicode ordinals, Unicode strings or "None". Unmapped characters\n are left untouched. Characters mapped to "None" are deleted. Note,\n a more flexible approach is to create a custom character mapping\n codec using the "codecs" module (see "encodings.cp1251" for an\n example).\n\nstr.upper()\n\n Return a copy of the string with all the cased characters [4]\n converted to uppercase. Note that "str.upper().isupper()" might be\n "False" if "s" contains uncased characters or if the Unicode\n category of the resulting character(s) is not "Lu" (Letter,\n uppercase), but e.g. "Lt" (Letter, titlecase).\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.zfill(width)\n\n Return the numeric string left filled with zeros in a string of\n length *width*. A sign prefix is handled correctly. The original\n string is returned if *width* is less than or equal to "len(s)".\n\n New in version 2.2.2.\n\nThe following methods are present only on unicode objects:\n\nunicode.isnumeric()\n\n Return "True" if there are only numeric characters in S, "False"\n otherwise. Numeric characters include digit characters, and all\n characters that have the Unicode numeric value property, e.g.\n U+2155, VULGAR FRACTION ONE FIFTH.\n\nunicode.isdecimal()\n\n Return "True" if there are only decimal characters in S, "False"\n otherwise. Decimal characters include digit characters, and all\n characters that can be used to form decimal-radix numbers, e.g.\n U+0660, ARABIC-INDIC DIGIT ZERO.\n',
'strings': u'\nString literals\n***************\n\nString literals are described by the following lexical definitions:\n\n stringliteral ::= [stringprefix](shortstring | longstring)\n stringprefix ::= "r" | "u" | "ur" | "R" | "U" | "UR" | "Ur" | "uR"\n | "b" | "B" | "br" | "Br" | "bR" | "BR"\n shortstring ::= "\'" shortstringitem* "\'" | \'"\' shortstringitem* \'"\'\n longstring ::= "\'\'\'" longstringitem* "\'\'\'"\n | \'"""\' longstringitem* \'"""\'\n shortstringitem ::= shortstringchar | escapeseq\n longstringitem ::= longstringchar | escapeseq\n shortstringchar ::= <any source character except "\\" or newline or the quote>\n longstringchar ::= <any source character except "\\">\n escapeseq ::= "\\" <any ASCII character>\n\nOne syntactic restriction not indicated by these productions is that\nwhitespace is not allowed between the "stringprefix" and the rest of\nthe string literal. The source character set is defined by the\nencoding declaration; it is ASCII if no encoding declaration is given\nin the source file; see section *Encoding declarations*.\n\nIn plain English: String literals can be enclosed in matching single\nquotes ("\'") or double quotes ("""). They can also be enclosed in\nmatching groups of three single or double quotes (these are generally\nreferred to as *triple-quoted strings*). The backslash ("\\")\ncharacter is used to escape characters that otherwise have a special\nmeaning, such as newline, backslash itself, or the quote character.\nString literals may optionally be prefixed with a letter "\'r\'" or\n"\'R\'"; such strings are called *raw strings* and use different rules\nfor interpreting backslash escape sequences. A prefix of "\'u\'" or\n"\'U\'" makes the string a Unicode string. Unicode strings use the\nUnicode character set as defined by the Unicode Consortium and ISO\n10646. Some additional escape sequences, described below, are\navailable in Unicode strings. A prefix of "\'b\'" or "\'B\'" is ignored in\nPython 2; it indicates that the literal should become a bytes literal\nin Python 3 (e.g. when code is automatically converted with 2to3). A\n"\'u\'" or "\'b\'" prefix may be followed by an "\'r\'" prefix.\n\nIn triple-quoted strings, unescaped newlines and quotes are allowed\n(and are retained), except that three unescaped quotes in a row\nterminate the string. (A "quote" is the character used to open the\nstring, i.e. either "\'" or """.)\n\nUnless an "\'r\'" or "\'R\'" prefix is present, escape sequences in\nstrings are interpreted according to rules similar to those used by\nStandard C. The recognized escape sequences are:\n\n+-------------------+-----------------------------------+---------+\n| Escape Sequence | Meaning | Notes |\n+===================+===================================+=========+\n| "\\newline" | Ignored | |\n+-------------------+-----------------------------------+---------+\n| "\\\\" | Backslash ("\\") | |\n+-------------------+-----------------------------------+---------+\n| "\\\'" | Single quote ("\'") | |\n+-------------------+-----------------------------------+---------+\n| "\\"" | Double quote (""") | |\n+-------------------+-----------------------------------+---------+\n| "\\a" | ASCII Bell (BEL) | |\n+-------------------+-----------------------------------+---------+\n| "\\b" | ASCII Backspace (BS) | |\n+-------------------+-----------------------------------+---------+\n| "\\f" | ASCII Formfeed (FF) | |\n+-------------------+-----------------------------------+---------+\n| "\\n" | ASCII Linefeed (LF) | |\n+-------------------+-----------------------------------+---------+\n| "\\N{name}" | Character named *name* in the | |\n| | Unicode database (Unicode only) | |\n+-------------------+-----------------------------------+---------+\n| "\\r" | ASCII Carriage Return (CR) | |\n+-------------------+-----------------------------------+---------+\n| "\\t" | ASCII Horizontal Tab (TAB) | |\n+-------------------+-----------------------------------+---------+\n| "\\uxxxx" | Character with 16-bit hex value | (1) |\n| | *xxxx* (Unicode only) | |\n+-------------------+-----------------------------------+---------+\n| "\\Uxxxxxxxx" | Character with 32-bit hex value | (2) |\n| | *xxxxxxxx* (Unicode only) | |\n+-------------------+-----------------------------------+---------+\n| "\\v" | ASCII Vertical Tab (VT) | |\n+-------------------+-----------------------------------+---------+\n| "\\ooo" | Character with octal value *ooo* | (3,5) |\n+-------------------+-----------------------------------+---------+\n| "\\xhh" | Character with hex value *hh* | (4,5) |\n+-------------------+-----------------------------------+---------+\n\nNotes:\n\n1. Individual code units which form parts of a surrogate pair can\n be encoded using this escape sequence.\n\n2. Any Unicode character can be encoded this way, but characters\n outside the Basic Multilingual Plane (BMP) will be encoded using a\n surrogate pair if Python is compiled to use 16-bit code units (the\n default).\n\n3. As in Standard C, up to three octal digits are accepted.\n\n4. Unlike in Standard C, exactly two hex digits are required.\n\n5. In a string literal, hexadecimal and octal escapes denote the\n byte with the given value; it is not necessary that the byte\n encodes a character in the source character set. In a Unicode\n literal, these escapes denote a Unicode character with the given\n value.\n\nUnlike Standard C, all unrecognized escape sequences are left in the\nstring unchanged, i.e., *the backslash is left in the string*. (This\nbehavior is useful when debugging: if an escape sequence is mistyped,\nthe resulting output is more easily recognized as broken.) It is also\nimportant to note that the escape sequences marked as "(Unicode only)"\nin the table above fall into the category of unrecognized escapes for\nnon-Unicode string literals.\n\nWhen an "\'r\'" or "\'R\'" prefix is present, a character following a\nbackslash is included in the string without change, and *all\nbackslashes are left in the string*. For example, the string literal\n"r"\\n"" consists of two characters: a backslash and a lowercase "\'n\'".\nString quotes can be escaped with a backslash, but the backslash\nremains in the string; for example, "r"\\""" is a valid string literal\nconsisting of two characters: a backslash and a double quote; "r"\\""\nis not a valid string literal (even a raw string cannot end in an odd\nnumber of backslashes). Specifically, *a raw string cannot end in a\nsingle backslash* (since the backslash would escape the following\nquote character). Note also that a single backslash followed by a\nnewline is interpreted as those two characters as part of the string,\n*not* as a line continuation.\n\nWhen an "\'r\'" or "\'R\'" prefix is used in conjunction with a "\'u\'" or\n"\'U\'" prefix, then the "\\uXXXX" and "\\UXXXXXXXX" escape sequences are\nprocessed while *all other backslashes are left in the string*. For\nexample, the string literal "ur"\\u0062\\n"" consists of three Unicode\ncharacters: \'LATIN SMALL LETTER B\', \'REVERSE SOLIDUS\', and \'LATIN\nSMALL LETTER N\'. Backslashes can be escaped with a preceding\nbackslash; however, both remain in the string. As a result, "\\uXXXX"\nescape sequences are only recognized when there are an odd number of\nbackslashes.\n',
'subscriptions': u'\nSubscriptions\n*************\n\nA subscription selects an item of a sequence (string, tuple or list)\nor mapping (dictionary) object:\n\n subscription ::= primary "[" expression_list "]"\n\nThe primary must evaluate to an object of a sequence or mapping type.\n\nIf the primary is a mapping, the expression list must evaluate to an\nobject whose value is one of the keys of the mapping, and the\nsubscription selects the value in the mapping that corresponds to that\nkey. (The expression list is a tuple except if it has exactly one\nitem.)\n\nIf the primary is a sequence, the expression (list) must evaluate to a\nplain integer. If this value is negative, the length of the sequence\nis added to it (so that, e.g., "x[-1]" selects the last item of "x".)\nThe resulting value must be a nonnegative integer less than the number\nof items in the sequence, and the subscription selects the item whose\nindex is that value (counting from zero).\n\nA string\'s items are characters. A character is not a separate data\ntype but a string of exactly one character.\n',
'truth': u'\nTruth Value Testing\n*******************\n\nAny object can be tested for truth value, for use in an "if" or\n"while" condition or as operand of the Boolean operations below. The\nfollowing values are considered false:\n\n* "None"\n\n* "False"\n\n* zero of any numeric type, for example, "0", "0L", "0.0", "0j".\n\n* any empty sequence, for example, "\'\'", "()", "[]".\n\n* any empty mapping, for example, "{}".\n\n* instances of user-defined classes, if the class defines a\n "__nonzero__()" or "__len__()" method, when that method returns the\n integer zero or "bool" value "False". [1]\n\nAll other values are considered true --- so objects of many types are\nalways true.\n\nOperations and built-in functions that have a Boolean result always\nreturn "0" or "False" for false and "1" or "True" for true, unless\notherwise stated. (Important exception: the Boolean operations "or"\nand "and" always return one of their operands.)\n',
'try': u'\nThe "try" statement\n*******************\n\nThe "try" statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n try_stmt ::= try1_stmt | try2_stmt\n try1_stmt ::= "try" ":" suite\n ("except" [expression [("as" | ",") identifier]] ":" suite)+\n ["else" ":" suite]\n ["finally" ":" suite]\n try2_stmt ::= "try" ":" suite\n "finally" ":" suite\n\nChanged in version 2.5: In previous versions of Python,\n"try"..."except"..."finally" did not work. "try"..."except" had to be\nnested in "try"..."finally".\n\nThe "except" clause(s) specify one or more exception handlers. When no\nexception occurs in the "try" clause, no exception handler is\nexecuted. When an exception occurs in the "try" suite, a search for an\nexception handler is started. This search inspects the except clauses\nin turn until one is found that matches the exception. An expression-\nless except clause, if present, must be last; it matches any\nexception. For an except clause with an expression, that expression\nis evaluated, and the clause matches the exception if the resulting\nobject is "compatible" with the exception. An object is compatible\nwith an exception if it is the class or a base class of the exception\nobject, or a tuple containing an item compatible with the exception.\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire "try" statement raised\nthe exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified in that except clause, if present, and the except\nclause\'s suite is executed. All except clauses must have an\nexecutable block. When the end of this block is reached, execution\ncontinues normally after the entire try statement. (This means that\nif two nested handlers exist for the same exception, and the exception\noccurs in the try clause of the inner handler, the outer handler will\nnot handle the exception.)\n\nBefore an except clause\'s suite is executed, details about the\nexception are assigned to three variables in the "sys" module:\n"sys.exc_type" receives the object identifying the exception;\n"sys.exc_value" receives the exception\'s parameter;\n"sys.exc_traceback" receives a traceback object (see section *The\nstandard type hierarchy*) identifying the point in the program where\nthe exception occurred. These details are also available through the\n"sys.exc_info()" function, which returns a tuple "(exc_type,\nexc_value, exc_traceback)". Use of the corresponding variables is\ndeprecated in favor of this function, since their use is unsafe in a\nthreaded program. As of Python 1.5, the variables are restored to\ntheir previous values (before the call) when returning from a function\nthat handled an exception.\n\nThe optional "else" clause is executed if and when control flows off\nthe end of the "try" clause. [2] Exceptions in the "else" clause are\nnot handled by the preceding "except" clauses.\n\nIf "finally" is present, it specifies a \'cleanup\' handler. The "try"\nclause is executed, including any "except" and "else" clauses. If an\nexception occurs in any of the clauses and is not handled, the\nexception is temporarily saved. The "finally" clause is executed. If\nthere is a saved exception, it is re-raised at the end of the\n"finally" clause. If the "finally" clause raises another exception or\nexecutes a "return" or "break" statement, the saved exception is\ndiscarded:\n\n >>> def f():\n ... try:\n ... 1/0\n ... finally:\n ... return 42\n ...\n >>> f()\n 42\n\nThe exception information is not available to the program during\nexecution of the "finally" clause.\n\nWhen a "return", "break" or "continue" statement is executed in the\n"try" suite of a "try"..."finally" statement, the "finally" clause is\nalso executed \'on the way out.\' A "continue" statement is illegal in\nthe "finally" clause. (The reason is a problem with the current\nimplementation --- this restriction may be lifted in the future).\n\nThe return value of a function is determined by the last "return"\nstatement executed. Since the "finally" clause always executes, a\n"return" statement executed in the "finally" clause will always be the\nlast one executed:\n\n >>> def foo():\n ... try:\n ... return \'try\'\n ... finally:\n ... return \'finally\'\n ...\n >>> foo()\n \'finally\'\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information on using the "raise" statement to\ngenerate exceptions may be found in section *The raise statement*.\n',
'types': u'\nThe standard type hierarchy\n***************************\n\nBelow is a list of the types that are built into Python. Extension\nmodules (written in C, Java, or other languages, depending on the\nimplementation) can define additional types. Future versions of\nPython may add types to the type hierarchy (e.g., rational numbers,\nefficiently stored arrays of integers, etc.).\n\nSome of the type descriptions below contain a paragraph listing\n\'special attributes.\' These are attributes that provide access to the\nimplementation and are not intended for general use. Their definition\nmay change in the future.\n\nNone\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name "None". It\n is used to signify the absence of a value in many situations, e.g.,\n it is returned from functions that don\'t explicitly return\n anything. Its truth value is false.\n\nNotImplemented\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name\n "NotImplemented". Numeric methods and rich comparison methods may\n return this value if they do not implement the operation for the\n operands provided. (The interpreter will then try the reflected\n operation, or some other fallback, depending on the operator.) Its\n truth value is true.\n\nEllipsis\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name\n "Ellipsis". It is used to indicate the presence of the "..." syntax\n in a slice. Its truth value is true.\n\n"numbers.Number"\n These are created by numeric literals and returned as results by\n arithmetic operators and arithmetic built-in functions. Numeric\n objects are immutable; once created their value never changes.\n Python numbers are of course strongly related to mathematical\n numbers, but subject to the limitations of numerical representation\n in computers.\n\n Python distinguishes between integers, floating point numbers, and\n complex numbers:\n\n "numbers.Integral"\n These represent elements from the mathematical set of integers\n (positive and negative).\n\n There are three types of integers:\n\n Plain integers\n These represent numbers in the range -2147483648 through\n 2147483647. (The range may be larger on machines with a\n larger natural word size, but not smaller.) When the result\n of an operation would fall outside this range, the result is\n normally returned as a long integer (in some cases, the\n exception "OverflowError" is raised instead). For the\n purpose of shift and mask operations, integers are assumed to\n have a binary, 2\'s complement notation using 32 or more bits,\n and hiding no bits from the user (i.e., all 4294967296\n different bit patterns correspond to different values).\n\n Long integers\n These represent numbers in an unlimited range, subject to\n available (virtual) memory only. For the purpose of shift\n and mask operations, a binary representation is assumed, and\n negative numbers are represented in a variant of 2\'s\n complement which gives the illusion of an infinite string of\n sign bits extending to the left.\n\n Booleans\n These represent the truth values False and True. The two\n objects representing the values "False" and "True" are the\n only Boolean objects. The Boolean type is a subtype of plain\n integers, and Boolean values behave like the values 0 and 1,\n respectively, in almost all contexts, the exception being\n that when converted to a string, the strings ""False"" or\n ""True"" are returned, respectively.\n\n The rules for integer representation are intended to give the\n most meaningful interpretation of shift and mask operations\n involving negative integers and the least surprises when\n switching between the plain and long integer domains. Any\n operation, if it yields a result in the plain integer domain,\n will yield the same result in the long integer domain or when\n using mixed operands. The switch between domains is transparent\n to the programmer.\n\n "numbers.Real" ("float")\n These represent machine-level double precision floating point\n numbers. You are at the mercy of the underlying machine\n architecture (and C or Java implementation) for the accepted\n range and handling of overflow. Python does not support single-\n precision floating point numbers; the savings in processor and\n memory usage that are usually the reason for using these are\n dwarfed by the overhead of using objects in Python, so there is\n no reason to complicate the language with two kinds of floating\n point numbers.\n\n "numbers.Complex"\n These represent complex numbers as a pair of machine-level\n double precision floating point numbers. The same caveats apply\n as for floating point numbers. The real and imaginary parts of a\n complex number "z" can be retrieved through the read-only\n attributes "z.real" and "z.imag".\n\nSequences\n These represent finite ordered sets indexed by non-negative\n numbers. The built-in function "len()" returns the number of items\n of a sequence. When the length of a sequence is *n*, the index set\n contains the numbers 0, 1, ..., *n*-1. Item *i* of sequence *a* is\n selected by "a[i]".\n\n Sequences also support slicing: "a[i:j]" selects all items with\n index *k* such that *i* "<=" *k* "<" *j*. When used as an\n expression, a slice is a sequence of the same type. This implies\n that the index set is renumbered so that it starts at 0.\n\n Some sequences also support "extended slicing" with a third "step"\n parameter: "a[i:j:k]" selects all items of *a* with index *x* where\n "x = i + n*k", *n* ">=" "0" and *i* "<=" *x* "<" *j*.\n\n Sequences are distinguished according to their mutability:\n\n Immutable sequences\n An object of an immutable sequence type cannot change once it is\n created. (If the object contains references to other objects,\n these other objects may be mutable and may be changed; however,\n the collection of objects directly referenced by an immutable\n object cannot change.)\n\n The following types are immutable sequences:\n\n Strings\n The items of a string are characters. There is no separate\n character type; a character is represented by a string of one\n item. Characters represent (at least) 8-bit bytes. The\n built-in functions "chr()" and "ord()" convert between\n characters and nonnegative integers representing the byte\n values. Bytes with the values 0-127 usually represent the\n corresponding ASCII values, but the interpretation of values\n is up to the program. The string data type is also used to\n represent arrays of bytes, e.g., to hold data read from a\n file.\n\n (On systems whose native character set is not ASCII, strings\n may use EBCDIC in their internal representation, provided the\n functions "chr()" and "ord()" implement a mapping between\n ASCII and EBCDIC, and string comparison preserves the ASCII\n order. Or perhaps someone can propose a better rule?)\n\n Unicode\n The items of a Unicode object are Unicode code units. A\n Unicode code unit is represented by a Unicode object of one\n item and can hold either a 16-bit or 32-bit value\n representing a Unicode ordinal (the maximum value for the\n ordinal is given in "sys.maxunicode", and depends on how\n Python is configured at compile time). Surrogate pairs may\n be present in the Unicode object, and will be reported as two\n separate items. The built-in functions "unichr()" and\n "ord()" convert between code units and nonnegative integers\n representing the Unicode ordinals as defined in the Unicode\n Standard 3.0. Conversion from and to other encodings are\n possible through the Unicode method "encode()" and the built-\n in function "unicode()".\n\n Tuples\n The items of a tuple are arbitrary Python objects. Tuples of\n two or more items are formed by comma-separated lists of\n expressions. A tuple of one item (a \'singleton\') can be\n formed by affixing a comma to an expression (an expression by\n itself does not create a tuple, since parentheses must be\n usable for grouping of expressions). An empty tuple can be\n formed by an empty pair of parentheses.\n\n Mutable sequences\n Mutable sequences can be changed after they are created. The\n subscription and slicing notations can be used as the target of\n assignment and "del" (delete) statements.\n\n There are currently two intrinsic mutable sequence types:\n\n Lists\n The items of a list are arbitrary Python objects. Lists are\n formed by placing a comma-separated list of expressions in\n square brackets. (Note that there are no special cases needed\n to form lists of length 0 or 1.)\n\n Byte Arrays\n A bytearray object is a mutable array. They are created by\n the built-in "bytearray()" constructor. Aside from being\n mutable (and hence unhashable), byte arrays otherwise provide\n the same interface and functionality as immutable bytes\n objects.\n\n The extension module "array" provides an additional example of a\n mutable sequence type.\n\nSet types\n These represent unordered, finite sets of unique, immutable\n objects. As such, they cannot be indexed by any subscript. However,\n they can be iterated over, and the built-in function "len()"\n returns the number of items in a set. Common uses for sets are fast\n membership testing, removing duplicates from a sequence, and\n computing mathematical operations such as intersection, union,\n difference, and symmetric difference.\n\n For set elements, the same immutability rules apply as for\n dictionary keys. Note that numeric types obey the normal rules for\n numeric comparison: if two numbers compare equal (e.g., "1" and\n "1.0"), only one of them can be contained in a set.\n\n There are currently two intrinsic set types:\n\n Sets\n These represent a mutable set. They are created by the built-in\n "set()" constructor and can be modified afterwards by several\n methods, such as "add()".\n\n Frozen sets\n These represent an immutable set. They are created by the\n built-in "frozenset()" constructor. As a frozenset is immutable\n and *hashable*, it can be used again as an element of another\n set, or as a dictionary key.\n\nMappings\n These represent finite sets of objects indexed by arbitrary index\n sets. The subscript notation "a[k]" selects the item indexed by "k"\n from the mapping "a"; this can be used in expressions and as the\n target of assignments or "del" statements. The built-in function\n "len()" returns the number of items in a mapping.\n\n There is currently a single intrinsic mapping type:\n\n Dictionaries\n These represent finite sets of objects indexed by nearly\n arbitrary values. The only types of values not acceptable as\n keys are values containing lists or dictionaries or other\n mutable types that are compared by value rather than by object\n identity, the reason being that the efficient implementation of\n dictionaries requires a key\'s hash value to remain constant.\n Numeric types used for keys obey the normal rules for numeric\n comparison: if two numbers compare equal (e.g., "1" and "1.0")\n then they can be used interchangeably to index the same\n dictionary entry.\n\n Dictionaries are mutable; they can be created by the "{...}"\n notation (see section *Dictionary displays*).\n\n The extension modules "dbm", "gdbm", and "bsddb" provide\n additional examples of mapping types.\n\nCallable types\n These are the types to which the function call operation (see\n section *Calls*) can be applied:\n\n User-defined functions\n A user-defined function object is created by a function\n definition (see section *Function definitions*). It should be\n called with an argument list containing the same number of items\n as the function\'s formal parameter list.\n\n Special attributes:\n\n +-------------------------+---------------------------------+-------------+\n | Attribute | Meaning | |\n +=========================+=================================+=============+\n | "__doc__" "func_doc" | The function\'s documentation | Writable |\n | | string, or "None" if | |\n | | unavailable. | |\n +-------------------------+---------------------------------+-------------+\n | "__name__" "func_name" | The function\'s name. | Writable |\n +-------------------------+---------------------------------+-------------+\n | "__module__" | The name of the module the | Writable |\n | | function was defined in, or | |\n | | "None" if unavailable. | |\n +-------------------------+---------------------------------+-------------+\n | "__defaults__" | A tuple containing default | Writable |\n | "func_defaults" | argument values for those | |\n | | arguments that have defaults, | |\n | | or "None" if no arguments have | |\n | | a default value. | |\n +-------------------------+---------------------------------+-------------+\n | "__code__" "func_code" | The code object representing | Writable |\n | | the compiled function body. | |\n +-------------------------+---------------------------------+-------------+\n | "__globals__" | A reference to the dictionary | Read-only |\n | "func_globals" | that holds the function\'s | |\n | | global variables --- the global | |\n | | namespace of the module in | |\n | | which the function was defined. | |\n +-------------------------+---------------------------------+-------------+\n | "__dict__" "func_dict" | The namespace supporting | Writable |\n | | arbitrary function attributes. | |\n +-------------------------+---------------------------------+-------------+\n | "__closure__" | "None" or a tuple of cells that | Read-only |\n | "func_closure" | contain bindings for the | |\n | | function\'s free variables. | |\n +-------------------------+---------------------------------+-------------+\n\n Most of the attributes labelled "Writable" check the type of the\n assigned value.\n\n Changed in version 2.4: "func_name" is now writable.\n\n Changed in version 2.6: The double-underscore attributes\n "__closure__", "__code__", "__defaults__", and "__globals__"\n were introduced as aliases for the corresponding "func_*"\n attributes for forwards compatibility with Python 3.\n\n Function objects also support getting and setting arbitrary\n attributes, which can be used, for example, to attach metadata\n to functions. Regular attribute dot-notation is used to get and\n set such attributes. *Note that the current implementation only\n supports function attributes on user-defined functions. Function\n attributes on built-in functions may be supported in the\n future.*\n\n Additional information about a function\'s definition can be\n retrieved from its code object; see the description of internal\n types below.\n\n User-defined methods\n A user-defined method object combines a class, a class instance\n (or "None") and any callable object (normally a user-defined\n function).\n\n Special read-only attributes: "im_self" is the class instance\n object, "im_func" is the function object; "im_class" is the\n class of "im_self" for bound methods or the class that asked for\n the method for unbound methods; "__doc__" is the method\'s\n documentation (same as "im_func.__doc__"); "__name__" is the\n method name (same as "im_func.__name__"); "__module__" is the\n name of the module the method was defined in, or "None" if\n unavailable.\n\n Changed in version 2.2: "im_self" used to refer to the class\n that defined the method.\n\n Changed in version 2.6: For Python 3 forward-compatibility,\n "im_func" is also available as "__func__", and "im_self" as\n "__self__".\n\n Methods also support accessing (but not setting) the arbitrary\n function attributes on the underlying function object.\n\n User-defined method objects may be created when getting an\n attribute of a class (perhaps via an instance of that class), if\n that attribute is a user-defined function object, an unbound\n user-defined method object, or a class method object. When the\n attribute is a user-defined method object, a new method object\n is only created if the class from which it is being retrieved is\n the same as, or a derived class of, the class stored in the\n original method object; otherwise, the original method object is\n used as it is.\n\n When a user-defined method object is created by retrieving a\n user-defined function object from a class, its "im_self"\n attribute is "None" and the method object is said to be unbound.\n When one is created by retrieving a user-defined function object\n from a class via one of its instances, its "im_self" attribute\n is the instance, and the method object is said to be bound. In\n either case, the new method\'s "im_class" attribute is the class\n from which the retrieval takes place, and its "im_func"\n attribute is the original function object.\n\n When a user-defined method object is created by retrieving\n another method object from a class or instance, the behaviour is\n the same as for a function object, except that the "im_func"\n attribute of the new instance is not the original method object\n but its "im_func" attribute.\n\n When a user-defined method object is created by retrieving a\n class method object from a class or instance, its "im_self"\n attribute is the class itself, and its "im_func" attribute is\n the function object underlying the class method.\n\n When an unbound user-defined method object is called, the\n underlying function ("im_func") is called, with the restriction\n that the first argument must be an instance of the proper class\n ("im_class") or of a derived class thereof.\n\n When a bound user-defined method object is called, the\n underlying function ("im_func") is called, inserting the class\n instance ("im_self") in front of the argument list. For\n instance, when "C" is a class which contains a definition for a\n function "f()", and "x" is an instance of "C", calling "x.f(1)"\n is equivalent to calling "C.f(x, 1)".\n\n When a user-defined method object is derived from a class method\n object, the "class instance" stored in "im_self" will actually\n be the class itself, so that calling either "x.f(1)" or "C.f(1)"\n is equivalent to calling "f(C,1)" where "f" is the underlying\n function.\n\n Note that the transformation from function object to (unbound or\n bound) method object happens each time the attribute is\n retrieved from the class or instance. In some cases, a fruitful\n optimization is to assign the attribute to a local variable and\n call that local variable. Also notice that this transformation\n only happens for user-defined functions; other callable objects\n (and all non-callable objects) are retrieved without\n transformation. It is also important to note that user-defined\n functions which are attributes of a class instance are not\n converted to bound methods; this *only* happens when the\n function is an attribute of the class.\n\n Generator functions\n A function or method which uses the "yield" statement (see\n section *The yield statement*) is called a *generator function*.\n Such a function, when called, always returns an iterator object\n which can be used to execute the body of the function: calling\n the iterator\'s "next()" method will cause the function to\n execute until it provides a value using the "yield" statement.\n When the function executes a "return" statement or falls off the\n end, a "StopIteration" exception is raised and the iterator will\n have reached the end of the set of values to be returned.\n\n Built-in functions\n A built-in function object is a wrapper around a C function.\n Examples of built-in functions are "len()" and "math.sin()"\n ("math" is a standard built-in module). The number and type of\n the arguments are determined by the C function. Special read-\n only attributes: "__doc__" is the function\'s documentation\n string, or "None" if unavailable; "__name__" is the function\'s\n name; "__self__" is set to "None" (but see the next item);\n "__module__" is the name of the module the function was defined\n in or "None" if unavailable.\n\n Built-in methods\n This is really a different disguise of a built-in function, this\n time containing an object passed to the C function as an\n implicit extra argument. An example of a built-in method is\n "alist.append()", assuming *alist* is a list object. In this\n case, the special read-only attribute "__self__" is set to the\n object denoted by *alist*.\n\n Class Types\n Class types, or "new-style classes," are callable. These\n objects normally act as factories for new instances of\n themselves, but variations are possible for class types that\n override "__new__()". The arguments of the call are passed to\n "__new__()" and, in the typical case, to "__init__()" to\n initialize the new instance.\n\n Classic Classes\n Class objects are described below. When a class object is\n called, a new class instance (also described below) is created\n and returned. This implies a call to the class\'s "__init__()"\n method if it has one. Any arguments are passed on to the\n "__init__()" method. If there is no "__init__()" method, the\n class must be called without arguments.\n\n Class instances\n Class instances are described below. Class instances are\n callable only when the class has a "__call__()" method;\n "x(arguments)" is a shorthand for "x.__call__(arguments)".\n\nModules\n Modules are imported by the "import" statement (see section *The\n import statement*). A module object has a namespace implemented by\n a dictionary object (this is the dictionary referenced by the\n func_globals attribute of functions defined in the module).\n Attribute references are translated to lookups in this dictionary,\n e.g., "m.x" is equivalent to "m.__dict__["x"]". A module object\n does not contain the code object used to initialize the module\n (since it isn\'t needed once the initialization is done).\n\n Attribute assignment updates the module\'s namespace dictionary,\n e.g., "m.x = 1" is equivalent to "m.__dict__["x"] = 1".\n\n Special read-only attribute: "__dict__" is the module\'s namespace\n as a dictionary object.\n\n **CPython implementation detail:** Because of the way CPython\n clears module dictionaries, the module dictionary will be cleared\n when the module falls out of scope even if the dictionary still has\n live references. To avoid this, copy the dictionary or keep the\n module around while using its dictionary directly.\n\n Predefined (writable) attributes: "__name__" is the module\'s name;\n "__doc__" is the module\'s documentation string, or "None" if\n unavailable; "__file__" is the pathname of the file from which the\n module was loaded, if it was loaded from a file. The "__file__"\n attribute is not present for C modules that are statically linked\n into the interpreter; for extension modules loaded dynamically from\n a shared library, it is the pathname of the shared library file.\n\nClasses\n Both class types (new-style classes) and class objects (old-\n style/classic classes) are typically created by class definitions\n (see section *Class definitions*). A class has a namespace\n implemented by a dictionary object. Class attribute references are\n translated to lookups in this dictionary, e.g., "C.x" is translated\n to "C.__dict__["x"]" (although for new-style classes in particular\n there are a number of hooks which allow for other means of locating\n attributes). When the attribute name is not found there, the\n attribute search continues in the base classes. For old-style\n classes, the search is depth-first, left-to-right in the order of\n occurrence in the base class list. New-style classes use the more\n complex C3 method resolution order which behaves correctly even in\n the presence of \'diamond\' inheritance structures where there are\n multiple inheritance paths leading back to a common ancestor.\n Additional details on the C3 MRO used by new-style classes can be\n found in the documentation accompanying the 2.3 release at\n https://www.python.org/download/releases/2.3/mro/.\n\n When a class attribute reference (for class "C", say) would yield a\n user-defined function object or an unbound user-defined method\n object whose associated class is either "C" or one of its base\n classes, it is transformed into an unbound user-defined method\n object whose "im_class" attribute is "C". When it would yield a\n class method object, it is transformed into a bound user-defined\n method object whose "im_self" attribute is "C". When it would\n yield a static method object, it is transformed into the object\n wrapped by the static method object. See section *Implementing\n Descriptors* for another way in which attributes retrieved from a\n class may differ from those actually contained in its "__dict__"\n (note that only new-style classes support descriptors).\n\n Class attribute assignments update the class\'s dictionary, never\n the dictionary of a base class.\n\n A class object can be called (see above) to yield a class instance\n (see below).\n\n Special attributes: "__name__" is the class name; "__module__" is\n the module name in which the class was defined; "__dict__" is the\n dictionary containing the class\'s namespace; "__bases__" is a tuple\n (possibly empty or a singleton) containing the base classes, in the\n order of their occurrence in the base class list; "__doc__" is the\n class\'s documentation string, or None if undefined.\n\nClass instances\n A class instance is created by calling a class object (see above).\n A class instance has a namespace implemented as a dictionary which\n is the first place in which attribute references are searched.\n When an attribute is not found there, and the instance\'s class has\n an attribute by that name, the search continues with the class\n attributes. If a class attribute is found that is a user-defined\n function object or an unbound user-defined method object whose\n associated class is the class (call it "C") of the instance for\n which the attribute reference was initiated or one of its bases, it\n is transformed into a bound user-defined method object whose\n "im_class" attribute is "C" and whose "im_self" attribute is the\n instance. Static method and class method objects are also\n transformed, as if they had been retrieved from class "C"; see\n above under "Classes". See section *Implementing Descriptors* for\n another way in which attributes of a class retrieved via its\n instances may differ from the objects actually stored in the\n class\'s "__dict__". If no class attribute is found, and the\n object\'s class has a "__getattr__()" method, that is called to\n satisfy the lookup.\n\n Attribute assignments and deletions update the instance\'s\n dictionary, never a class\'s dictionary. If the class has a\n "__setattr__()" or "__delattr__()" method, this is called instead\n of updating the instance dictionary directly.\n\n Class instances can pretend to be numbers, sequences, or mappings\n if they have methods with certain special names. See section\n *Special method names*.\n\n Special attributes: "__dict__" is the attribute dictionary;\n "__class__" is the instance\'s class.\n\nFiles\n A file object represents an open file. File objects are created by\n the "open()" built-in function, and also by "os.popen()",\n "os.fdopen()", and the "makefile()" method of socket objects (and\n perhaps by other functions or methods provided by extension\n modules). The objects "sys.stdin", "sys.stdout" and "sys.stderr"\n are initialized to file objects corresponding to the interpreter\'s\n standard input, output and error streams. See *File Objects* for\n complete documentation of file objects.\n\nInternal types\n A few types used internally by the interpreter are exposed to the\n user. Their definitions may change with future versions of the\n interpreter, but they are mentioned here for completeness.\n\n Code objects\n Code objects represent *byte-compiled* executable Python code,\n or *bytecode*. The difference between a code object and a\n function object is that the function object contains an explicit\n reference to the function\'s globals (the module in which it was\n defined), while a code object contains no context; also the\n default argument values are stored in the function object, not\n in the code object (because they represent values calculated at\n run-time). Unlike function objects, code objects are immutable\n and contain no references (directly or indirectly) to mutable\n objects.\n\n Special read-only attributes: "co_name" gives the function name;\n "co_argcount" is the number of positional arguments (including\n arguments with default values); "co_nlocals" is the number of\n local variables used by the function (including arguments);\n "co_varnames" is a tuple containing the names of the local\n variables (starting with the argument names); "co_cellvars" is a\n tuple containing the names of local variables that are\n referenced by nested functions; "co_freevars" is a tuple\n containing the names of free variables; "co_code" is a string\n representing the sequence of bytecode instructions; "co_consts"\n is a tuple containing the literals used by the bytecode;\n "co_names" is a tuple containing the names used by the bytecode;\n "co_filename" is the filename from which the code was compiled;\n "co_firstlineno" is the first line number of the function;\n "co_lnotab" is a string encoding the mapping from bytecode\n offsets to line numbers (for details see the source code of the\n interpreter); "co_stacksize" is the required stack size\n (including local variables); "co_flags" is an integer encoding a\n number of flags for the interpreter.\n\n The following flag bits are defined for "co_flags": bit "0x04"\n is set if the function uses the "*arguments" syntax to accept an\n arbitrary number of positional arguments; bit "0x08" is set if\n the function uses the "**keywords" syntax to accept arbitrary\n keyword arguments; bit "0x20" is set if the function is a\n generator.\n\n Future feature declarations ("from __future__ import division")\n also use bits in "co_flags" to indicate whether a code object\n was compiled with a particular feature enabled: bit "0x2000" is\n set if the function was compiled with future division enabled;\n bits "0x10" and "0x1000" were used in earlier versions of\n Python.\n\n Other bits in "co_flags" are reserved for internal use.\n\n If a code object represents a function, the first item in\n "co_consts" is the documentation string of the function, or\n "None" if undefined.\n\n Frame objects\n Frame objects represent execution frames. They may occur in\n traceback objects (see below).\n\n Special read-only attributes: "f_back" is to the previous stack\n frame (towards the caller), or "None" if this is the bottom\n stack frame; "f_code" is the code object being executed in this\n frame; "f_locals" is the dictionary used to look up local\n variables; "f_globals" is used for global variables;\n "f_builtins" is used for built-in (intrinsic) names;\n "f_restricted" is a flag indicating whether the function is\n executing in restricted execution mode; "f_lasti" gives the\n precise instruction (this is an index into the bytecode string\n of the code object).\n\n Special writable attributes: "f_trace", if not "None", is a\n function called at the start of each source code line (this is\n used by the debugger); "f_exc_type", "f_exc_value",\n "f_exc_traceback" represent the last exception raised in the\n parent frame provided another exception was ever raised in the\n current frame (in all other cases they are None); "f_lineno" is\n the current line number of the frame --- writing to this from\n within a trace function jumps to the given line (only for the\n bottom-most frame). A debugger can implement a Jump command\n (aka Set Next Statement) by writing to f_lineno.\n\n Traceback objects\n Traceback objects represent a stack trace of an exception. A\n traceback object is created when an exception occurs. When the\n search for an exception handler unwinds the execution stack, at\n each unwound level a traceback object is inserted in front of\n the current traceback. When an exception handler is entered,\n the stack trace is made available to the program. (See section\n *The try statement*.) It is accessible as "sys.exc_traceback",\n and also as the third item of the tuple returned by\n "sys.exc_info()". The latter is the preferred interface, since\n it works correctly when the program is using multiple threads.\n When the program contains no suitable handler, the stack trace\n is written (nicely formatted) to the standard error stream; if\n the interpreter is interactive, it is also made available to the\n user as "sys.last_traceback".\n\n Special read-only attributes: "tb_next" is the next level in the\n stack trace (towards the frame where the exception occurred), or\n "None" if there is no next level; "tb_frame" points to the\n execution frame of the current level; "tb_lineno" gives the line\n number where the exception occurred; "tb_lasti" indicates the\n precise instruction. The line number and last instruction in\n the traceback may differ from the line number of its frame\n object if the exception occurred in a "try" statement with no\n matching except clause or with a finally clause.\n\n Slice objects\n Slice objects are used to represent slices when *extended slice\n syntax* is used. This is a slice using two colons, or multiple\n slices or ellipses separated by commas, e.g., "a[i:j:step]",\n "a[i:j, k:l]", or "a[..., i:j]". They are also created by the\n built-in "slice()" function.\n\n Special read-only attributes: "start" is the lower bound; "stop"\n is the upper bound; "step" is the step value; each is "None" if\n omitted. These attributes can have any type.\n\n Slice objects support one method:\n\n slice.indices(self, length)\n\n This method takes a single integer argument *length* and\n computes information about the extended slice that the slice\n object would describe if applied to a sequence of *length*\n items. It returns a tuple of three integers; respectively\n these are the *start* and *stop* indices and the *step* or\n stride length of the slice. Missing or out-of-bounds indices\n are handled in a manner consistent with regular slices.\n\n New in version 2.3.\n\n Static method objects\n Static method objects provide a way of defeating the\n transformation of function objects to method objects described\n above. A static method object is a wrapper around any other\n object, usually a user-defined method object. When a static\n method object is retrieved from a class or a class instance, the\n object actually returned is the wrapped object, which is not\n subject to any further transformation. Static method objects are\n not themselves callable, although the objects they wrap usually\n are. Static method objects are created by the built-in\n "staticmethod()" constructor.\n\n Class method objects\n A class method object, like a static method object, is a wrapper\n around another object that alters the way in which that object\n is retrieved from classes and class instances. The behaviour of\n class method objects upon such retrieval is described above,\n under "User-defined methods". Class method objects are created\n by the built-in "classmethod()" constructor.\n',
'typesfunctions': u'\nFunctions\n*********\n\nFunction objects are created by function definitions. The only\noperation on a function object is to call it: "func(argument-list)".\n\nThere are really two flavors of function objects: built-in functions\nand user-defined functions. Both support the same operation (to call\nthe function), but the implementation is different, hence the\ndifferent object types.\n\nSee *Function definitions* for more information.\n',
'typesmapping': u'\nMapping Types --- "dict"\n************************\n\nA *mapping* object maps *hashable* values to arbitrary objects.\nMappings are mutable objects. There is currently only one standard\nmapping type, the *dictionary*. (For other containers see the built\nin "list", "set", and "tuple" classes, and the "collections" module.)\n\nA dictionary\'s keys are *almost* arbitrary values. Values that are\nnot *hashable*, that is, values containing lists, dictionaries or\nother mutable types (that are compared by value rather than by object\nidentity) may not be used as keys. Numeric types used for keys obey\nthe normal rules for numeric comparison: if two numbers compare equal\n(such as "1" and "1.0") then they can be used interchangeably to index\nthe same dictionary entry. (Note however, that since computers store\nfloating-point numbers as approximations it is usually unwise to use\nthem as dictionary keys.)\n\nDictionaries can be created by placing a comma-separated list of "key:\nvalue" pairs within braces, for example: "{\'jack\': 4098, \'sjoerd\':\n4127}" or "{4098: \'jack\', 4127: \'sjoerd\'}", or by the "dict"\nconstructor.\n\nclass class dict(**kwarg)\nclass class dict(mapping, **kwarg)\nclass class dict(iterable, **kwarg)\n\n Return a new dictionary initialized from an optional positional\n argument and a possibly empty set of keyword arguments.\n\n If no positional argument is given, an empty dictionary is created.\n If a positional argument is given and it is a mapping object, a\n dictionary is created with the same key-value pairs as the mapping\n object. Otherwise, the positional argument must be an *iterable*\n object. Each item in the iterable must itself be an iterable with\n exactly two objects. The first object of each item becomes a key\n in the new dictionary, and the second object the corresponding\n value. If a key occurs more than once, the last value for that key\n becomes the corresponding value in the new dictionary.\n\n If keyword arguments are given, the keyword arguments and their\n values are added to the dictionary created from the positional\n argument. If a key being added is already present, the value from\n the keyword argument replaces the value from the positional\n argument.\n\n To illustrate, the following examples all return a dictionary equal\n to "{"one": 1, "two": 2, "three": 3}":\n\n >>> a = dict(one=1, two=2, three=3)\n >>> b = {\'one\': 1, \'two\': 2, \'three\': 3}\n >>> c = dict(zip([\'one\', \'two\', \'three\'], [1, 2, 3]))\n >>> d = dict([(\'two\', 2), (\'one\', 1), (\'three\', 3)])\n >>> e = dict({\'three\': 3, \'one\': 1, \'two\': 2})\n >>> a == b == c == d == e\n True\n\n Providing keyword arguments as in the first example only works for\n keys that are valid Python identifiers. Otherwise, any valid keys\n can be used.\n\n New in version 2.2.\n\n Changed in version 2.3: Support for building a dictionary from\n keyword arguments added.\n\n These are the operations that dictionaries support (and therefore,\n custom mapping types should support too):\n\n len(d)\n\n Return the number of items in the dictionary *d*.\n\n d[key]\n\n Return the item of *d* with key *key*. Raises a "KeyError" if\n *key* is not in the map.\n\n New in version 2.5: If a subclass of dict defines a method\n "__missing__()", if the key *key* is not present, the "d[key]"\n operation calls that method with the key *key* as argument. The\n "d[key]" operation then returns or raises whatever is returned\n or raised by the "__missing__(key)" call if the key is not\n present. No other operations or methods invoke "__missing__()".\n If "__missing__()" is not defined, "KeyError" is raised.\n "__missing__()" must be a method; it cannot be an instance\n variable. For an example, see "collections.defaultdict".\n\n d[key] = value\n\n Set "d[key]" to *value*.\n\n del d[key]\n\n Remove "d[key]" from *d*. Raises a "KeyError" if *key* is not\n in the map.\n\n key in d\n\n Return "True" if *d* has a key *key*, else "False".\n\n New in version 2.2.\n\n key not in d\n\n Equivalent to "not key in d".\n\n New in version 2.2.\n\n iter(d)\n\n Return an iterator over the keys of the dictionary. This is a\n shortcut for "iterkeys()".\n\n clear()\n\n Remove all items from the dictionary.\n\n copy()\n\n Return a shallow copy of the dictionary.\n\n fromkeys(seq[, value])\n\n Create a new dictionary with keys from *seq* and values set to\n *value*.\n\n "fromkeys()" is a class method that returns a new dictionary.\n *value* defaults to "None".\n\n New in version 2.3.\n\n get(key[, default])\n\n Return the value for *key* if *key* is in the dictionary, else\n *default*. If *default* is not given, it defaults to "None", so\n that this method never raises a "KeyError".\n\n has_key(key)\n\n Test for the presence of *key* in the dictionary. "has_key()"\n is deprecated in favor of "key in d".\n\n items()\n\n Return a copy of the dictionary\'s list of "(key, value)" pairs.\n\n **CPython implementation detail:** Keys and values are listed in\n an arbitrary order which is non-random, varies across Python\n implementations, and depends on the dictionary\'s history of\n insertions and deletions.\n\n If "items()", "keys()", "values()", "iteritems()", "iterkeys()",\n and "itervalues()" are called with no intervening modifications\n to the dictionary, the lists will directly correspond. This\n allows the creation of "(value, key)" pairs using "zip()":\n "pairs = zip(d.values(), d.keys())". The same relationship\n holds for the "iterkeys()" and "itervalues()" methods: "pairs =\n zip(d.itervalues(), d.iterkeys())" provides the same value for\n "pairs". Another way to create the same list is "pairs = [(v, k)\n for (k, v) in d.iteritems()]".\n\n iteritems()\n\n Return an iterator over the dictionary\'s "(key, value)" pairs.\n See the note for "dict.items()".\n\n Using "iteritems()" while adding or deleting entries in the\n dictionary may raise a "RuntimeError" or fail to iterate over\n all entries.\n\n New in version 2.2.\n\n iterkeys()\n\n Return an iterator over the dictionary\'s keys. See the note for\n "dict.items()".\n\n Using "iterkeys()" while adding or deleting entries in the\n dictionary may raise a "RuntimeError" or fail to iterate over\n all entries.\n\n New in version 2.2.\n\n itervalues()\n\n Return an iterator over the dictionary\'s values. See the note\n for "dict.items()".\n\n Using "itervalues()" while adding or deleting entries in the\n dictionary may raise a "RuntimeError" or fail to iterate over\n all entries.\n\n New in version 2.2.\n\n keys()\n\n Return a copy of the dictionary\'s list of keys. See the note\n for "dict.items()".\n\n pop(key[, default])\n\n If *key* is in the dictionary, remove it and return its value,\n else return *default*. If *default* is not given and *key* is\n not in the dictionary, a "KeyError" is raised.\n\n New in version 2.3.\n\n popitem()\n\n Remove and return an arbitrary "(key, value)" pair from the\n dictionary.\n\n "popitem()" is useful to destructively iterate over a\n dictionary, as often used in set algorithms. If the dictionary\n is empty, calling "popitem()" raises a "KeyError".\n\n setdefault(key[, default])\n\n If *key* is in the dictionary, return its value. If not, insert\n *key* with a value of *default* and return *default*. *default*\n defaults to "None".\n\n update([other])\n\n Update the dictionary with the key/value pairs from *other*,\n overwriting existing keys. Return "None".\n\n "update()" accepts either another dictionary object or an\n iterable of key/value pairs (as tuples or other iterables of\n length two). If keyword arguments are specified, the dictionary\n is then updated with those key/value pairs: "d.update(red=1,\n blue=2)".\n\n Changed in version 2.4: Allowed the argument to be an iterable\n of key/value pairs and allowed keyword arguments.\n\n values()\n\n Return a copy of the dictionary\'s list of values. See the note\n for "dict.items()".\n\n viewitems()\n\n Return a new view of the dictionary\'s items ("(key, value)"\n pairs). See below for documentation of view objects.\n\n New in version 2.7.\n\n viewkeys()\n\n Return a new view of the dictionary\'s keys. See below for\n documentation of view objects.\n\n New in version 2.7.\n\n viewvalues()\n\n Return a new view of the dictionary\'s values. See below for\n documentation of view objects.\n\n New in version 2.7.\n\n\nDictionary view objects\n=======================\n\nThe objects returned by "dict.viewkeys()", "dict.viewvalues()" and\n"dict.viewitems()" are *view objects*. They provide a dynamic view on\nthe dictionary\'s entries, which means that when the dictionary\nchanges, the view reflects these changes.\n\nDictionary views can be iterated over to yield their respective data,\nand support membership tests:\n\nlen(dictview)\n\n Return the number of entries in the dictionary.\n\niter(dictview)\n\n Return an iterator over the keys, values or items (represented as\n tuples of "(key, value)") in the dictionary.\n\n Keys and values are iterated over in an arbitrary order which is\n non-random, varies across Python implementations, and depends on\n the dictionary\'s history of insertions and deletions. If keys,\n values and items views are iterated over with no intervening\n modifications to the dictionary, the order of items will directly\n correspond. This allows the creation of "(value, key)" pairs using\n "zip()": "pairs = zip(d.values(), d.keys())". Another way to\n create the same list is "pairs = [(v, k) for (k, v) in d.items()]".\n\n Iterating views while adding or deleting entries in the dictionary\n may raise a "RuntimeError" or fail to iterate over all entries.\n\nx in dictview\n\n Return "True" if *x* is in the underlying dictionary\'s keys, values\n or items (in the latter case, *x* should be a "(key, value)"\n tuple).\n\nKeys views are set-like since their entries are unique and hashable.\nIf all values are hashable, so that (key, value) pairs are unique and\nhashable, then the items view is also set-like. (Values views are not\ntreated as set-like since the entries are generally not unique.) Then\nthese set operations are available ("other" refers either to another\nview or a set):\n\ndictview & other\n\n Return the intersection of the dictview and the other object as a\n new set.\n\ndictview | other\n\n Return the union of the dictview and the other object as a new set.\n\ndictview - other\n\n Return the difference between the dictview and the other object\n (all elements in *dictview* that aren\'t in *other*) as a new set.\n\ndictview ^ other\n\n Return the symmetric difference (all elements either in *dictview*\n or *other*, but not in both) of the dictview and the other object\n as a new set.\n\nAn example of dictionary view usage:\n\n >>> dishes = {\'eggs\': 2, \'sausage\': 1, \'bacon\': 1, \'spam\': 500}\n >>> keys = dishes.viewkeys()\n >>> values = dishes.viewvalues()\n\n >>> # iteration\n >>> n = 0\n >>> for val in values:\n ... n += val\n >>> print(n)\n 504\n\n >>> # keys and values are iterated over in the same order\n >>> list(keys)\n [\'eggs\', \'bacon\', \'sausage\', \'spam\']\n >>> list(values)\n [2, 1, 1, 500]\n\n >>> # view objects are dynamic and reflect dict changes\n >>> del dishes[\'eggs\']\n >>> del dishes[\'sausage\']\n >>> list(keys)\n [\'spam\', \'bacon\']\n\n >>> # set operations\n >>> keys & {\'eggs\', \'bacon\', \'salad\'}\n {\'bacon\'}\n',
'typesmethods': u'\nMethods\n*******\n\nMethods are functions that are called using the attribute notation.\nThere are two flavors: built-in methods (such as "append()" on lists)\nand class instance methods. Built-in methods are described with the\ntypes that support them.\n\nThe implementation adds two special read-only attributes to class\ninstance methods: "m.im_self" is the object on which the method\noperates, and "m.im_func" is the function implementing the method.\nCalling "m(arg-1, arg-2, ..., arg-n)" is completely equivalent to\ncalling "m.im_func(m.im_self, arg-1, arg-2, ..., arg-n)".\n\nClass instance methods are either *bound* or *unbound*, referring to\nwhether the method was accessed through an instance or a class,\nrespectively. When a method is unbound, its "im_self" attribute will\nbe "None" and if called, an explicit "self" object must be passed as\nthe first argument. In this case, "self" must be an instance of the\nunbound method\'s class (or a subclass of that class), otherwise a\n"TypeError" is raised.\n\nLike function objects, methods objects support getting arbitrary\nattributes. However, since method attributes are actually stored on\nthe underlying function object ("meth.im_func"), setting method\nattributes on either bound or unbound methods is disallowed.\nAttempting to set an attribute on a method results in an\n"AttributeError" being raised. In order to set a method attribute,\nyou need to explicitly set it on the underlying function object:\n\n >>> class C:\n ... def method(self):\n ... pass\n ...\n >>> c = C()\n >>> c.method.whoami = \'my name is method\' # can\'t set on the method\n Traceback (most recent call last):\n File "<stdin>", line 1, in <module>\n AttributeError: \'instancemethod\' object has no attribute \'whoami\'\n >>> c.method.im_func.whoami = \'my name is method\'\n >>> c.method.whoami\n \'my name is method\'\n\nSee *The standard type hierarchy* for more information.\n',
'typesmodules': u'\nModules\n*******\n\nThe only special operation on a module is attribute access: "m.name",\nwhere *m* is a module and *name* accesses a name defined in *m*\'s\nsymbol table. Module attributes can be assigned to. (Note that the\n"import" statement is not, strictly speaking, an operation on a module\nobject; "import foo" does not require a module object named *foo* to\nexist, rather it requires an (external) *definition* for a module\nnamed *foo* somewhere.)\n\nA special attribute of every module is "__dict__". This is the\ndictionary containing the module\'s symbol table. Modifying this\ndictionary will actually change the module\'s symbol table, but direct\nassignment to the "__dict__" attribute is not possible (you can write\n"m.__dict__[\'a\'] = 1", which defines "m.a" to be "1", but you can\'t\nwrite "m.__dict__ = {}"). Modifying "__dict__" directly is not\nrecommended.\n\nModules built into the interpreter are written like this: "<module\n\'sys\' (built-in)>". If loaded from a file, they are written as\n"<module \'os\' from \'/usr/local/lib/pythonX.Y/os.pyc\'>".\n',
'typesseq': u'\nSequence Types --- "str", "unicode", "list", "tuple", "bytearray", "buffer", "xrange"\n*************************************************************************************\n\nThere are seven sequence types: strings, Unicode strings, lists,\ntuples, bytearrays, buffers, and xrange objects.\n\nFor other containers see the built in "dict" and "set" classes, and\nthe "collections" module.\n\nString literals are written in single or double quotes: "\'xyzzy\'",\n""frobozz"". See *String literals* for more about string literals.\nUnicode strings are much like strings, but are specified in the syntax\nusing a preceding "\'u\'" character: "u\'abc\'", "u"def"". In addition to\nthe functionality described here, there are also string-specific\nmethods described in the *String Methods* section. Lists are\nconstructed with square brackets, separating items with commas: "[a,\nb, c]". Tuples are constructed by the comma operator (not within\nsquare brackets), with or without enclosing parentheses, but an empty\ntuple must have the enclosing parentheses, such as "a, b, c" or "()".\nA single item tuple must have a trailing comma, such as "(d,)".\n\nBytearray objects are created with the built-in function\n"bytearray()".\n\nBuffer objects are not directly supported by Python syntax, but can be\ncreated by calling the built-in function "buffer()". They don\'t\nsupport concatenation or repetition.\n\nObjects of type xrange are similar to buffers in that there is no\nspecific syntax to create them, but they are created using the\n"xrange()" function. They don\'t support slicing, concatenation or\nrepetition, and using "in", "not in", "min()" or "max()" on them is\ninefficient.\n\nMost sequence types support the following operations. The "in" and\n"not in" operations have the same priorities as the comparison\noperations. The "+" and "*" operations have the same priority as the\ncorresponding numeric operations. [3] Additional methods are provided\nfor *Mutable Sequence Types*.\n\nThis table lists the sequence operations sorted in ascending priority.\nIn the table, *s* and *t* are sequences of the same type; *n*, *i* and\n*j* are integers:\n\n+--------------------+----------------------------------+------------+\n| Operation | Result | Notes |\n+====================+==================================+============+\n| "x in s" | "True" if an item of *s* is | (1) |\n| | equal to *x*, else "False" | |\n+--------------------+----------------------------------+------------+\n| "x not in s" | "False" if an item of *s* is | (1) |\n| | equal to *x*, else "True" | |\n+--------------------+----------------------------------+------------+\n| "s + t" | the concatenation of *s* and *t* | (6) |\n+--------------------+----------------------------------+------------+\n| "s * n, n * s" | *n* shallow copies of *s* | (2) |\n| | concatenated | |\n+--------------------+----------------------------------+------------+\n| "s[i]" | *i*th item of *s*, origin 0 | (3) |\n+--------------------+----------------------------------+------------+\n| "s[i:j]" | slice of *s* from *i* to *j* | (3)(4) |\n+--------------------+----------------------------------+------------+\n| "s[i:j:k]" | slice of *s* from *i* to *j* | (3)(5) |\n| | with step *k* | |\n+--------------------+----------------------------------+------------+\n| "len(s)" | length of *s* | |\n+--------------------+----------------------------------+------------+\n| "min(s)" | smallest item of *s* | |\n+--------------------+----------------------------------+------------+\n| "max(s)" | largest item of *s* | |\n+--------------------+----------------------------------+------------+\n| "s.index(x)" | index of the first occurrence of | |\n| | *x* in *s* | |\n+--------------------+----------------------------------+------------+\n| "s.count(x)" | total number of occurrences of | |\n| | *x* in *s* | |\n+--------------------+----------------------------------+------------+\n\nSequence types also support comparisons. In particular, tuples and\nlists are compared lexicographically by comparing corresponding\nelements. This means that to compare equal, every element must compare\nequal and the two sequences must be of the same type and have the same\nlength. (For full details see *Comparisons* in the language\nreference.)\n\nNotes:\n\n1. When *s* is a string or Unicode string object the "in" and "not\n in" operations act like a substring test. In Python versions\n before 2.3, *x* had to be a string of length 1. In Python 2.3 and\n beyond, *x* may be a string of any length.\n\n2. Values of *n* less than "0" are treated as "0" (which yields an\n empty sequence of the same type as *s*). Note also that the copies\n are shallow; nested structures are not copied. This often haunts\n new Python programmers; consider:\n\n >>> lists = [[]] * 3\n >>> lists\n [[], [], []]\n >>> lists[0].append(3)\n >>> lists\n [[3], [3], [3]]\n\n What has happened is that "[[]]" is a one-element list containing\n an empty list, so all three elements of "[[]] * 3" are (pointers\n to) this single empty list. Modifying any of the elements of\n "lists" modifies this single list. You can create a list of\n different lists this way:\n\n >>> lists = [[] for i in range(3)]\n >>> lists[0].append(3)\n >>> lists[1].append(5)\n >>> lists[2].append(7)\n >>> lists\n [[3], [5], [7]]\n\n3. If *i* or *j* is negative, the index is relative to the end of\n the string: "len(s) + i" or "len(s) + j" is substituted. But note\n that "-0" is still "0".\n\n4. The slice of *s* from *i* to *j* is defined as the sequence of\n items with index *k* such that "i <= k < j". If *i* or *j* is\n greater than "len(s)", use "len(s)". If *i* is omitted or "None",\n use "0". If *j* is omitted or "None", use "len(s)". If *i* is\n greater than or equal to *j*, the slice is empty.\n\n5. The slice of *s* from *i* to *j* with step *k* is defined as the\n sequence of items with index "x = i + n*k" such that "0 <= n <\n (j-i)/k". In other words, the indices are "i", "i+k", "i+2*k",\n "i+3*k" and so on, stopping when *j* is reached (but never\n including *j*). If *i* or *j* is greater than "len(s)", use\n "len(s)". If *i* or *j* are omitted or "None", they become "end"\n values (which end depends on the sign of *k*). Note, *k* cannot be\n zero. If *k* is "None", it is treated like "1".\n\n6. **CPython implementation detail:** If *s* and *t* are both\n strings, some Python implementations such as CPython can usually\n perform an in-place optimization for assignments of the form "s = s\n + t" or "s += t". When applicable, this optimization makes\n quadratic run-time much less likely. This optimization is both\n version and implementation dependent. For performance sensitive\n code, it is preferable to use the "str.join()" method which assures\n consistent linear concatenation performance across versions and\n implementations.\n\n Changed in version 2.4: Formerly, string concatenation never\n occurred in-place.\n\n\nString Methods\n==============\n\nBelow are listed the string methods which both 8-bit strings and\nUnicode objects support. Some of them are also available on\n"bytearray" objects.\n\nIn addition, Python\'s strings support the sequence type methods\ndescribed in the *Sequence Types --- str, unicode, list, tuple,\nbytearray, buffer, xrange* section. To output formatted strings use\ntemplate strings or the "%" operator described in the *String\nFormatting Operations* section. Also, see the "re" module for string\nfunctions based on regular expressions.\n\nstr.capitalize()\n\n Return a copy of the string with its first character capitalized\n and the rest lowercased.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.center(width[, fillchar])\n\n Return centered in a string of length *width*. Padding is done\n using the specified *fillchar* (default is a space).\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.count(sub[, start[, end]])\n\n Return the number of non-overlapping occurrences of substring *sub*\n in the range [*start*, *end*]. Optional arguments *start* and\n *end* are interpreted as in slice notation.\n\nstr.decode([encoding[, errors]])\n\n Decodes the string using the codec registered for *encoding*.\n *encoding* defaults to the default string encoding. *errors* may\n be given to set a different error handling scheme. The default is\n "\'strict\'", meaning that encoding errors raise "UnicodeError".\n Other possible values are "\'ignore\'", "\'replace\'" and any other\n name registered via "codecs.register_error()", see section *Codec\n Base Classes*.\n\n New in version 2.2.\n\n Changed in version 2.3: Support for other error handling schemes\n added.\n\n Changed in version 2.7: Support for keyword arguments added.\n\nstr.encode([encoding[, errors]])\n\n Return an encoded version of the string. Default encoding is the\n current default string encoding. *errors* may be given to set a\n different error handling scheme. The default for *errors* is\n "\'strict\'", meaning that encoding errors raise a "UnicodeError".\n Other possible values are "\'ignore\'", "\'replace\'",\n "\'xmlcharrefreplace\'", "\'backslashreplace\'" and any other name\n registered via "codecs.register_error()", see section *Codec Base\n Classes*. For a list of possible encodings, see section *Standard\n Encodings*.\n\n New in version 2.0.\n\n Changed in version 2.3: Support for "\'xmlcharrefreplace\'" and\n "\'backslashreplace\'" and other error handling schemes added.\n\n Changed in version 2.7: Support for keyword arguments added.\n\nstr.endswith(suffix[, start[, end]])\n\n Return "True" if the string ends with the specified *suffix*,\n otherwise return "False". *suffix* can also be a tuple of suffixes\n to look for. With optional *start*, test beginning at that\n position. With optional *end*, stop comparing at that position.\n\n Changed in version 2.5: Accept tuples as *suffix*.\n\nstr.expandtabs([tabsize])\n\n Return a copy of the string where all tab characters are replaced\n by one or more spaces, depending on the current column and the\n given tab size. Tab positions occur every *tabsize* characters\n (default is 8, giving tab positions at columns 0, 8, 16 and so on).\n To expand the string, the current column is set to zero and the\n string is examined character by character. If the character is a\n tab ("\\t"), one or more space characters are inserted in the result\n until the current column is equal to the next tab position. (The\n tab character itself is not copied.) If the character is a newline\n ("\\n") or return ("\\r"), it is copied and the current column is\n reset to zero. Any other character is copied unchanged and the\n current column is incremented by one regardless of how the\n character is represented when printed.\n\n >>> \'01\\t012\\t0123\\t01234\'.expandtabs()\n \'01 012 0123 01234\'\n >>> \'01\\t012\\t0123\\t01234\'.expandtabs(4)\n \'01 012 0123 01234\'\n\nstr.find(sub[, start[, end]])\n\n Return the lowest index in the string where substring *sub* is\n found, such that *sub* is contained in the slice "s[start:end]".\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return "-1" if *sub* is not found.\n\n Note: The "find()" method should be used only if you need to know\n the position of *sub*. To check if *sub* is a substring or not,\n use the "in" operator:\n\n >>> \'Py\' in \'Python\'\n True\n\nstr.format(*args, **kwargs)\n\n Perform a string formatting operation. The string on which this\n method is called can contain literal text or replacement fields\n delimited by braces "{}". Each replacement field contains either\n the numeric index of a positional argument, or the name of a\n keyword argument. Returns a copy of the string where each\n replacement field is replaced with the string value of the\n corresponding argument.\n\n >>> "The sum of 1 + 2 is {0}".format(1+2)\n \'The sum of 1 + 2 is 3\'\n\n See *Format String Syntax* for a description of the various\n formatting options that can be specified in format strings.\n\n This method of string formatting is the new standard in Python 3,\n and should be preferred to the "%" formatting described in *String\n Formatting Operations* in new code.\n\n New in version 2.6.\n\nstr.index(sub[, start[, end]])\n\n Like "find()", but raise "ValueError" when the substring is not\n found.\n\nstr.isalnum()\n\n Return true if all characters in the string are alphanumeric and\n there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isalpha()\n\n Return true if all characters in the string are alphabetic and\n there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isdigit()\n\n Return true if all characters in the string are digits and there is\n at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.islower()\n\n Return true if all cased characters [4] in the string are lowercase\n and there is at least one cased character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isspace()\n\n Return true if there are only whitespace characters in the string\n and there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.istitle()\n\n Return true if the string is a titlecased string and there is at\n least one character, for example uppercase characters may only\n follow uncased characters and lowercase characters only cased ones.\n Return false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isupper()\n\n Return true if all cased characters [4] in the string are uppercase\n and there is at least one cased character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.join(iterable)\n\n Return a string which is the concatenation of the strings in the\n *iterable* *iterable*. The separator between elements is the\n string providing this method.\n\nstr.ljust(width[, fillchar])\n\n Return the string left justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than or\n equal to "len(s)".\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.lower()\n\n Return a copy of the string with all the cased characters [4]\n converted to lowercase.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.lstrip([chars])\n\n Return a copy of the string with leading characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or "None", the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a prefix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.lstrip()\n \'spacious \'\n >>> \'www.example.com\'.lstrip(\'cmowz.\')\n \'example.com\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.partition(sep)\n\n Split the string at the first occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing the string itself, followed by\n two empty strings.\n\n New in version 2.5.\n\nstr.replace(old, new[, count])\n\n Return a copy of the string with all occurrences of substring *old*\n replaced by *new*. If the optional argument *count* is given, only\n the first *count* occurrences are replaced.\n\nstr.rfind(sub[, start[, end]])\n\n Return the highest index in the string where substring *sub* is\n found, such that *sub* is contained within "s[start:end]".\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return "-1" on failure.\n\nstr.rindex(sub[, start[, end]])\n\n Like "rfind()" but raises "ValueError" when the substring *sub* is\n not found.\n\nstr.rjust(width[, fillchar])\n\n Return the string right justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than or\n equal to "len(s)".\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.rpartition(sep)\n\n Split the string at the last occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing two empty strings, followed by\n the string itself.\n\n New in version 2.5.\n\nstr.rsplit([sep[, maxsplit]])\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit* splits\n are done, the *rightmost* ones. If *sep* is not specified or\n "None", any whitespace string is a separator. Except for splitting\n from the right, "rsplit()" behaves like "split()" which is\n described in detail below.\n\n New in version 2.4.\n\nstr.rstrip([chars])\n\n Return a copy of the string with trailing characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or "None", the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a suffix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.rstrip()\n \' spacious\'\n >>> \'mississippi\'.rstrip(\'ipz\')\n \'mississ\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.split([sep[, maxsplit]])\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit*\n splits are done (thus, the list will have at most "maxsplit+1"\n elements). If *maxsplit* is not specified or "-1", then there is\n no limit on the number of splits (all possible splits are made).\n\n If *sep* is given, consecutive delimiters are not grouped together\n and are deemed to delimit empty strings (for example,\n "\'1,,2\'.split(\',\')" returns "[\'1\', \'\', \'2\']"). The *sep* argument\n may consist of multiple characters (for example,\n "\'1<>2<>3\'.split(\'<>\')" returns "[\'1\', \'2\', \'3\']"). Splitting an\n empty string with a specified separator returns "[\'\']".\n\n If *sep* is not specified or is "None", a different splitting\n algorithm is applied: runs of consecutive whitespace are regarded\n as a single separator, and the result will contain no empty strings\n at the start or end if the string has leading or trailing\n whitespace. Consequently, splitting an empty string or a string\n consisting of just whitespace with a "None" separator returns "[]".\n\n For example, "\' 1 2 3 \'.split()" returns "[\'1\', \'2\', \'3\']", and\n "\' 1 2 3 \'.split(None, 1)" returns "[\'1\', \'2 3 \']".\n\nstr.splitlines([keepends])\n\n Return a list of the lines in the string, breaking at line\n boundaries. This method uses the *universal newlines* approach to\n splitting lines. Line breaks are not included in the resulting list\n unless *keepends* is given and true.\n\n For example, "\'ab c\\n\\nde fg\\rkl\\r\\n\'.splitlines()" returns "[\'ab\n c\', \'\', \'de fg\', \'kl\']", while the same call with\n "splitlines(True)" returns "[\'ab c\\n\', \'\\n\', \'de fg\\r\', \'kl\\r\\n\']".\n\n Unlike "split()" when a delimiter string *sep* is given, this\n method returns an empty list for the empty string, and a terminal\n line break does not result in an extra line.\n\nstr.startswith(prefix[, start[, end]])\n\n Return "True" if string starts with the *prefix*, otherwise return\n "False". *prefix* can also be a tuple of prefixes to look for.\n With optional *start*, test string beginning at that position.\n With optional *end*, stop comparing string at that position.\n\n Changed in version 2.5: Accept tuples as *prefix*.\n\nstr.strip([chars])\n\n Return a copy of the string with the leading and trailing\n characters removed. The *chars* argument is a string specifying the\n set of characters to be removed. If omitted or "None", the *chars*\n argument defaults to removing whitespace. The *chars* argument is\n not a prefix or suffix; rather, all combinations of its values are\n stripped:\n\n >>> \' spacious \'.strip()\n \'spacious\'\n >>> \'www.example.com\'.strip(\'cmowz.\')\n \'example\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.swapcase()\n\n Return a copy of the string with uppercase characters converted to\n lowercase and vice versa.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.title()\n\n Return a titlecased version of the string where words start with an\n uppercase character and the remaining characters are lowercase.\n\n The algorithm uses a simple language-independent definition of a\n word as groups of consecutive letters. The definition works in\n many contexts but it means that apostrophes in contractions and\n possessives form word boundaries, which may not be the desired\n result:\n\n >>> "they\'re bill\'s friends from the UK".title()\n "They\'Re Bill\'S Friends From The Uk"\n\n A workaround for apostrophes can be constructed using regular\n expressions:\n\n >>> import re\n >>> def titlecase(s):\n ... return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n ... lambda mo: mo.group(0)[0].upper() +\n ... mo.group(0)[1:].lower(),\n ... s)\n ...\n >>> titlecase("they\'re bill\'s friends.")\n "They\'re Bill\'s Friends."\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.translate(table[, deletechars])\n\n Return a copy of the string where all characters occurring in the\n optional argument *deletechars* are removed, and the remaining\n characters have been mapped through the given translation table,\n which must be a string of length 256.\n\n You can use the "maketrans()" helper function in the "string"\n module to create a translation table. For string objects, set the\n *table* argument to "None" for translations that only delete\n characters:\n\n >>> \'read this short text\'.translate(None, \'aeiou\')\n \'rd ths shrt txt\'\n\n New in version 2.6: Support for a "None" *table* argument.\n\n For Unicode objects, the "translate()" method does not accept the\n optional *deletechars* argument. Instead, it returns a copy of the\n *s* where all characters have been mapped through the given\n translation table which must be a mapping of Unicode ordinals to\n Unicode ordinals, Unicode strings or "None". Unmapped characters\n are left untouched. Characters mapped to "None" are deleted. Note,\n a more flexible approach is to create a custom character mapping\n codec using the "codecs" module (see "encodings.cp1251" for an\n example).\n\nstr.upper()\n\n Return a copy of the string with all the cased characters [4]\n converted to uppercase. Note that "str.upper().isupper()" might be\n "False" if "s" contains uncased characters or if the Unicode\n category of the resulting character(s) is not "Lu" (Letter,\n uppercase), but e.g. "Lt" (Letter, titlecase).\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.zfill(width)\n\n Return the numeric string left filled with zeros in a string of\n length *width*. A sign prefix is handled correctly. The original\n string is returned if *width* is less than or equal to "len(s)".\n\n New in version 2.2.2.\n\nThe following methods are present only on unicode objects:\n\nunicode.isnumeric()\n\n Return "True" if there are only numeric characters in S, "False"\n otherwise. Numeric characters include digit characters, and all\n characters that have the Unicode numeric value property, e.g.\n U+2155, VULGAR FRACTION ONE FIFTH.\n\nunicode.isdecimal()\n\n Return "True" if there are only decimal characters in S, "False"\n otherwise. Decimal characters include digit characters, and all\n characters that can be used to form decimal-radix numbers, e.g.\n U+0660, ARABIC-INDIC DIGIT ZERO.\n\n\nString Formatting Operations\n============================\n\nString and Unicode objects have one unique built-in operation: the "%"\noperator (modulo). This is also known as the string *formatting* or\n*interpolation* operator. Given "format % values" (where *format* is\na string or Unicode object), "%" conversion specifications in *format*\nare replaced with zero or more elements of *values*. The effect is\nsimilar to the using "sprintf()" in the C language. If *format* is a\nUnicode object, or if any of the objects being converted using the\n"%s" conversion are Unicode objects, the result will also be a Unicode\nobject.\n\nIf *format* requires a single argument, *values* may be a single non-\ntuple object. [5] Otherwise, *values* must be a tuple with exactly\nthe number of items specified by the format string, or a single\nmapping object (for example, a dictionary).\n\nA conversion specifier contains two or more characters and has the\nfollowing components, which must occur in this order:\n\n1. The "\'%\'" character, which marks the start of the specifier.\n\n2. Mapping key (optional), consisting of a parenthesised sequence\n of characters (for example, "(somename)").\n\n3. Conversion flags (optional), which affect the result of some\n conversion types.\n\n4. Minimum field width (optional). If specified as an "\'*\'"\n (asterisk), the actual width is read from the next element of the\n tuple in *values*, and the object to convert comes after the\n minimum field width and optional precision.\n\n5. Precision (optional), given as a "\'.\'" (dot) followed by the\n precision. If specified as "\'*\'" (an asterisk), the actual width\n is read from the next element of the tuple in *values*, and the\n value to convert comes after the precision.\n\n6. Length modifier (optional).\n\n7. Conversion type.\n\nWhen the right argument is a dictionary (or other mapping type), then\nthe formats in the string *must* include a parenthesised mapping key\ninto that dictionary inserted immediately after the "\'%\'" character.\nThe mapping key selects the value to be formatted from the mapping.\nFor example:\n\n>>> print \'%(language)s has %(number)03d quote types.\' % \\\n... {"language": "Python", "number": 2}\nPython has 002 quote types.\n\nIn this case no "*" specifiers may occur in a format (since they\nrequire a sequential parameter list).\n\nThe conversion flag characters are:\n\n+-----------+-----------------------------------------------------------------------+\n| Flag | Meaning |\n+===========+=======================================================================+\n| "\'#\'" | The value conversion will use the "alternate form" (where defined |\n| | below). |\n+-----------+-----------------------------------------------------------------------+\n| "\'0\'" | The conversion will be zero padded for numeric values. |\n+-----------+-----------------------------------------------------------------------+\n| "\'-\'" | The converted value is left adjusted (overrides the "\'0\'" conversion |\n| | if both are given). |\n+-----------+-----------------------------------------------------------------------+\n| "\' \'" | (a space) A blank should be left before a positive number (or empty |\n| | string) produced by a signed conversion. |\n+-----------+-----------------------------------------------------------------------+\n| "\'+\'" | A sign character ("\'+\'" or "\'-\'") will precede the conversion |\n| | (overrides a "space" flag). |\n+-----------+-----------------------------------------------------------------------+\n\nA length modifier ("h", "l", or "L") may be present, but is ignored as\nit is not necessary for Python -- so e.g. "%ld" is identical to "%d".\n\nThe conversion types are:\n\n+--------------+-------------------------------------------------------+---------+\n| Conversion | Meaning | Notes |\n+==============+=======================================================+=========+\n| "\'d\'" | Signed integer decimal. | |\n+--------------+-------------------------------------------------------+---------+\n| "\'i\'" | Signed integer decimal. | |\n+--------------+-------------------------------------------------------+---------+\n| "\'o\'" | Signed octal value. | (1) |\n+--------------+-------------------------------------------------------+---------+\n| "\'u\'" | Obsolete type -- it is identical to "\'d\'". | (7) |\n+--------------+-------------------------------------------------------+---------+\n| "\'x\'" | Signed hexadecimal (lowercase). | (2) |\n+--------------+-------------------------------------------------------+---------+\n| "\'X\'" | Signed hexadecimal (uppercase). | (2) |\n+--------------+-------------------------------------------------------+---------+\n| "\'e\'" | Floating point exponential format (lowercase). | (3) |\n+--------------+-------------------------------------------------------+---------+\n| "\'E\'" | Floating point exponential format (uppercase). | (3) |\n+--------------+-------------------------------------------------------+---------+\n| "\'f\'" | Floating point decimal format. | (3) |\n+--------------+-------------------------------------------------------+---------+\n| "\'F\'" | Floating point decimal format. | (3) |\n+--------------+-------------------------------------------------------+---------+\n| "\'g\'" | Floating point format. Uses lowercase exponential | (4) |\n| | format if exponent is less than -4 or not less than | |\n| | precision, decimal format otherwise. | |\n+--------------+-------------------------------------------------------+---------+\n| "\'G\'" | Floating point format. Uses uppercase exponential | (4) |\n| | format if exponent is less than -4 or not less than | |\n| | precision, decimal format otherwise. | |\n+--------------+-------------------------------------------------------+---------+\n| "\'c\'" | Single character (accepts integer or single character | |\n| | string). | |\n+--------------+-------------------------------------------------------+---------+\n| "\'r\'" | String (converts any Python object using *repr()*). | (5) |\n+--------------+-------------------------------------------------------+---------+\n| "\'s\'" | String (converts any Python object using "str()"). | (6) |\n+--------------+-------------------------------------------------------+---------+\n| "\'%\'" | No argument is converted, results in a "\'%\'" | |\n| | character in the result. | |\n+--------------+-------------------------------------------------------+---------+\n\nNotes:\n\n1. The alternate form causes a leading zero ("\'0\'") to be inserted\n between left-hand padding and the formatting of the number if the\n leading character of the result is not already a zero.\n\n2. The alternate form causes a leading "\'0x\'" or "\'0X\'" (depending\n on whether the "\'x\'" or "\'X\'" format was used) to be inserted\n between left-hand padding and the formatting of the number if the\n leading character of the result is not already a zero.\n\n3. The alternate form causes the result to always contain a decimal\n point, even if no digits follow it.\n\n The precision determines the number of digits after the decimal\n point and defaults to 6.\n\n4. The alternate form causes the result to always contain a decimal\n point, and trailing zeroes are not removed as they would otherwise\n be.\n\n The precision determines the number of significant digits before\n and after the decimal point and defaults to 6.\n\n5. The "%r" conversion was added in Python 2.0.\n\n The precision determines the maximal number of characters used.\n\n6. If the object or format provided is a "unicode" string, the\n resulting string will also be "unicode".\n\n The precision determines the maximal number of characters used.\n\n7. See **PEP 237**.\n\nSince Python strings have an explicit length, "%s" conversions do not\nassume that "\'\\0\'" is the end of the string.\n\nChanged in version 2.7: "%f" conversions for numbers whose absolute\nvalue is over 1e50 are no longer replaced by "%g" conversions.\n\nAdditional string operations are defined in standard modules "string"\nand "re".\n\n\nXRange Type\n===========\n\nThe "xrange" type is an immutable sequence which is commonly used for\nlooping. The advantage of the "xrange" type is that an "xrange"\nobject will always take the same amount of memory, no matter the size\nof the range it represents. There are no consistent performance\nadvantages.\n\nXRange objects have very little behavior: they only support indexing,\niteration, and the "len()" function.\n\n\nMutable Sequence Types\n======================\n\nList and "bytearray" objects support additional operations that allow\nin-place modification of the object. Other mutable sequence types\n(when added to the language) should also support these operations.\nStrings and tuples are immutable sequence types: such objects cannot\nbe modified once created. The following operations are defined on\nmutable sequence types (where *x* is an arbitrary object):\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n| "s[i] = x" | item *i* of *s* is replaced by | |\n| | *x* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j] = t" | slice of *s* from *i* to *j* is | |\n| | replaced by the contents of the | |\n| | iterable *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j]" | same as "s[i:j] = []" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j:k] = t" | the elements of "s[i:j:k]" are | (1) |\n| | replaced by those of *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j:k]" | removes the elements of | |\n| | "s[i:j:k]" from the list | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.append(x)" | same as "s[len(s):len(s)] = [x]" | (2) |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.extend(x)" | same as "s[len(s):len(s)] = x" | (3) |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.count(x)" | return number of *i*\'s for which | |\n| | "s[i] == x" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.index(x[, i[, j]])" | return smallest *k* such that | (4) |\n| | "s[k] == x" and "i <= k < j" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.insert(i, x)" | same as "s[i:i] = [x]" | (5) |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.pop([i])" | same as "x = s[i]; del s[i]; | (6) |\n| | return x" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.remove(x)" | same as "del s[s.index(x)]" | (4) |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.reverse()" | reverses the items of *s* in | (7) |\n| | place | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.sort([cmp[, key[, | sort the items of *s* in place | (7)(8)(9)(10) |\n| reverse]]])" | | |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The C implementation of Python has historically accepted\n multiple parameters and implicitly joined them into a tuple; this\n no longer works in Python 2.0. Use of this misfeature has been\n deprecated since Python 1.4.\n\n3. *x* can be any iterable object.\n\n4. Raises "ValueError" when *x* is not found in *s*. When a\n negative index is passed as the second or third parameter to the\n "index()" method, the list length is added, as for slice indices.\n If it is still negative, it is truncated to zero, as for slice\n indices.\n\n Changed in version 2.3: Previously, "index()" didn\'t have arguments\n for specifying start and stop positions.\n\n5. When a negative index is passed as the first parameter to the\n "insert()" method, the list length is added, as for slice indices.\n If it is still negative, it is truncated to zero, as for slice\n indices.\n\n Changed in version 2.3: Previously, all negative indices were\n truncated to zero.\n\n6. The "pop()" method\'s optional argument *i* defaults to "-1", so\n that by default the last item is removed and returned.\n\n7. The "sort()" and "reverse()" methods modify the list in place\n for economy of space when sorting or reversing a large list. To\n remind you that they operate by side effect, they don\'t return the\n sorted or reversed list.\n\n8. The "sort()" method takes optional arguments for controlling the\n comparisons.\n\n *cmp* specifies a custom comparison function of two arguments (list\n items) which should return a negative, zero or positive number\n depending on whether the first argument is considered smaller than,\n equal to, or larger than the second argument: "cmp=lambda x,y:\n cmp(x.lower(), y.lower())". The default value is "None".\n\n *key* specifies a function of one argument that is used to extract\n a comparison key from each list element: "key=str.lower". The\n default value is "None".\n\n *reverse* is a boolean value. If set to "True", then the list\n elements are sorted as if each comparison were reversed.\n\n In general, the *key* and *reverse* conversion processes are much\n faster than specifying an equivalent *cmp* function. This is\n because *cmp* is called multiple times for each list element while\n *key* and *reverse* touch each element only once. Use\n "functools.cmp_to_key()" to convert an old-style *cmp* function to\n a *key* function.\n\n Changed in version 2.3: Support for "None" as an equivalent to\n omitting *cmp* was added.\n\n Changed in version 2.4: Support for *key* and *reverse* was added.\n\n9. Starting with Python 2.3, the "sort()" method is guaranteed to\n be stable. A sort is stable if it guarantees not to change the\n relative order of elements that compare equal --- this is helpful\n for sorting in multiple passes (for example, sort by department,\n then by salary grade).\n\n10. **CPython implementation detail:** While a list is being\n sorted, the effect of attempting to mutate, or even inspect, the\n list is undefined. The C implementation of Python 2.3 and newer\n makes the list appear empty for the duration, and raises\n "ValueError" if it can detect that the list has been mutated\n during a sort.\n',
'typesseq-mutable': u'\nMutable Sequence Types\n**********************\n\nList and "bytearray" objects support additional operations that allow\nin-place modification of the object. Other mutable sequence types\n(when added to the language) should also support these operations.\nStrings and tuples are immutable sequence types: such objects cannot\nbe modified once created. The following operations are defined on\nmutable sequence types (where *x* is an arbitrary object):\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n| "s[i] = x" | item *i* of *s* is replaced by | |\n| | *x* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j] = t" | slice of *s* from *i* to *j* is | |\n| | replaced by the contents of the | |\n| | iterable *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j]" | same as "s[i:j] = []" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j:k] = t" | the elements of "s[i:j:k]" are | (1) |\n| | replaced by those of *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j:k]" | removes the elements of | |\n| | "s[i:j:k]" from the list | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.append(x)" | same as "s[len(s):len(s)] = [x]" | (2) |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.extend(x)" | same as "s[len(s):len(s)] = x" | (3) |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.count(x)" | return number of *i*\'s for which | |\n| | "s[i] == x" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.index(x[, i[, j]])" | return smallest *k* such that | (4) |\n| | "s[k] == x" and "i <= k < j" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.insert(i, x)" | same as "s[i:i] = [x]" | (5) |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.pop([i])" | same as "x = s[i]; del s[i]; | (6) |\n| | return x" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.remove(x)" | same as "del s[s.index(x)]" | (4) |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.reverse()" | reverses the items of *s* in | (7) |\n| | place | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.sort([cmp[, key[, | sort the items of *s* in place | (7)(8)(9)(10) |\n| reverse]]])" | | |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The C implementation of Python has historically accepted\n multiple parameters and implicitly joined them into a tuple; this\n no longer works in Python 2.0. Use of this misfeature has been\n deprecated since Python 1.4.\n\n3. *x* can be any iterable object.\n\n4. Raises "ValueError" when *x* is not found in *s*. When a\n negative index is passed as the second or third parameter to the\n "index()" method, the list length is added, as for slice indices.\n If it is still negative, it is truncated to zero, as for slice\n indices.\n\n Changed in version 2.3: Previously, "index()" didn\'t have arguments\n for specifying start and stop positions.\n\n5. When a negative index is passed as the first parameter to the\n "insert()" method, the list length is added, as for slice indices.\n If it is still negative, it is truncated to zero, as for slice\n indices.\n\n Changed in version 2.3: Previously, all negative indices were\n truncated to zero.\n\n6. The "pop()" method\'s optional argument *i* defaults to "-1", so\n that by default the last item is removed and returned.\n\n7. The "sort()" and "reverse()" methods modify the list in place\n for economy of space when sorting or reversing a large list. To\n remind you that they operate by side effect, they don\'t return the\n sorted or reversed list.\n\n8. The "sort()" method takes optional arguments for controlling the\n comparisons.\n\n *cmp* specifies a custom comparison function of two arguments (list\n items) which should return a negative, zero or positive number\n depending on whether the first argument is considered smaller than,\n equal to, or larger than the second argument: "cmp=lambda x,y:\n cmp(x.lower(), y.lower())". The default value is "None".\n\n *key* specifies a function of one argument that is used to extract\n a comparison key from each list element: "key=str.lower". The\n default value is "None".\n\n *reverse* is a boolean value. If set to "True", then the list\n elements are sorted as if each comparison were reversed.\n\n In general, the *key* and *reverse* conversion processes are much\n faster than specifying an equivalent *cmp* function. This is\n because *cmp* is called multiple times for each list element while\n *key* and *reverse* touch each element only once. Use\n "functools.cmp_to_key()" to convert an old-style *cmp* function to\n a *key* function.\n\n Changed in version 2.3: Support for "None" as an equivalent to\n omitting *cmp* was added.\n\n Changed in version 2.4: Support for *key* and *reverse* was added.\n\n9. Starting with Python 2.3, the "sort()" method is guaranteed to\n be stable. A sort is stable if it guarantees not to change the\n relative order of elements that compare equal --- this is helpful\n for sorting in multiple passes (for example, sort by department,\n then by salary grade).\n\n10. **CPython implementation detail:** While a list is being\n sorted, the effect of attempting to mutate, or even inspect, the\n list is undefined. The C implementation of Python 2.3 and newer\n makes the list appear empty for the duration, and raises\n "ValueError" if it can detect that the list has been mutated\n during a sort.\n',
'unary': u'\nUnary arithmetic and bitwise operations\n***************************************\n\nAll unary arithmetic and bitwise operations have the same priority:\n\n u_expr ::= power | "-" u_expr | "+" u_expr | "~" u_expr\n\nThe unary "-" (minus) operator yields the negation of its numeric\nargument.\n\nThe unary "+" (plus) operator yields its numeric argument unchanged.\n\nThe unary "~" (invert) operator yields the bitwise inversion of its\nplain or long integer argument. The bitwise inversion of "x" is\ndefined as "-(x+1)". It only applies to integral numbers.\n\nIn all three cases, if the argument does not have the proper type, a\n"TypeError" exception is raised.\n',
'while': u'\nThe "while" statement\n*********************\n\nThe "while" statement is used for repeated execution as long as an\nexpression is true:\n\n while_stmt ::= "while" expression ":" suite\n ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the "else" clause, if present, is executed\nand the loop terminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite. A "continue" statement\nexecuted in the first suite skips the rest of the suite and goes back\nto testing the expression.\n',
'with': u'\nThe "with" statement\n********************\n\nNew in version 2.5.\n\nThe "with" statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section *With Statement\nContext Managers*). This allows common "try"..."except"..."finally"\nusage patterns to be encapsulated for convenient reuse.\n\n with_stmt ::= "with" with_item ("," with_item)* ":" suite\n with_item ::= expression ["as" target]\n\nThe execution of the "with" statement with one "item" proceeds as\nfollows:\n\n1. The context expression (the expression given in the "with_item")\n is evaluated to obtain a context manager.\n\n2. The context manager\'s "__exit__()" is loaded for later use.\n\n3. The context manager\'s "__enter__()" method is invoked.\n\n4. If a target was included in the "with" statement, the return\n value from "__enter__()" is assigned to it.\n\n Note: The "with" statement guarantees that if the "__enter__()"\n method returns without an error, then "__exit__()" will always be\n called. Thus, if an error occurs during the assignment to the\n target list, it will be treated the same as an error occurring\n within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s "__exit__()" method is invoked. If an\n exception caused the suite to be exited, its type, value, and\n traceback are passed as arguments to "__exit__()". Otherwise, three\n "None" arguments are supplied.\n\n If the suite was exited due to an exception, and the return value\n from the "__exit__()" method was false, the exception is reraised.\n If the return value was true, the exception is suppressed, and\n execution continues with the statement following the "with"\n statement.\n\n If the suite was exited for any reason other than an exception, the\n return value from "__exit__()" is ignored, and execution proceeds\n at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple "with" statements were nested:\n\n with A() as a, B() as b:\n suite\n\nis equivalent to\n\n with A() as a:\n with B() as b:\n suite\n\nNote: In Python 2.5, the "with" statement is only allowed when the\n "with_statement" feature has been enabled. It is always enabled in\n Python 2.6.\n\nChanged in version 2.7: Support for multiple context expressions.\n\nSee also: **PEP 0343** - The "with" statement\n\n The specification, background, and examples for the Python "with"\n statement.\n',
'yield': u'\nThe "yield" statement\n*********************\n\n yield_stmt ::= yield_expression\n\nThe "yield" statement is only used when defining a generator function,\nand is only used in the body of the generator function. Using a\n"yield" statement in a function definition is sufficient to cause that\ndefinition to create a generator function instead of a normal\nfunction.\n\nWhen a generator function is called, it returns an iterator known as a\ngenerator iterator, or more commonly, a generator. The body of the\ngenerator function is executed by calling the generator\'s "next()"\nmethod repeatedly until it raises an exception.\n\nWhen a "yield" statement is executed, the state of the generator is\nfrozen and the value of "expression_list" is returned to "next()"\'s\ncaller. By "frozen" we mean that all local state is retained,\nincluding the current bindings of local variables, the instruction\npointer, and the internal evaluation stack: enough information is\nsaved so that the next time "next()" is invoked, the function can\nproceed exactly as if the "yield" statement were just another external\ncall.\n\nAs of Python version 2.5, the "yield" statement is now allowed in the\n"try" clause of a "try" ... "finally" construct. If the generator is\nnot resumed before it is finalized (by reaching a zero reference count\nor by being garbage collected), the generator-iterator\'s "close()"\nmethod will be called, allowing any pending "finally" clauses to\nexecute.\n\nFor full details of "yield" semantics, refer to the *Yield\nexpressions* section.\n\nNote: In Python 2.2, the "yield" statement was only allowed when the\n "generators" feature has been enabled. This "__future__" import\n statement was used to enable the feature:\n\n from __future__ import generators\n\nSee also: **PEP 0255** - Simple Generators\n\n The proposal for adding generators and the "yield" statement to\n Python.\n\n **PEP 0342** - Coroutines via Enhanced Generators\n The proposal that, among other generator enhancements, proposed\n allowing "yield" to appear inside a "try" ... "finally" block.\n'}
| 5,069
| 54,147
| 0.689044
|
ed3638b5db06066df7dcf34326cbc03d4c033fc5
| 2,935
|
py
|
Python
|
tasks.py
|
chen19901225/cqh_file_watcher
|
48e0c177934a836551851e619926096074d64353
|
[
"MIT"
] | null | null | null |
tasks.py
|
chen19901225/cqh_file_watcher
|
48e0c177934a836551851e619926096074d64353
|
[
"MIT"
] | null | null | null |
tasks.py
|
chen19901225/cqh_file_watcher
|
48e0c177934a836551851e619926096074d64353
|
[
"MIT"
] | null | null | null |
import os
import json
import re
from invoke import task
proj_name = 'cqh_file_watcher'
proj_dir = os.path.dirname(os.path.abspath(__file__))
python = os.path.join(proj_dir, 'venv/bin/python')
history_path = os.path.join(proj_dir, '.history')
def get_branch_name(c):
out = c.run("git branch")
# print(out)
lines = out.stdout.splitlines()
if not lines:
return 'master'
current_branch_line = [ele for ele in lines if ele.startswith("*")][0]
print(current_branch_line)
current_branch = re.split(r"\s+", current_branch_line)[-1]
return current_branch
def get_line_args(kwargs):
line = []
for (key, value) in kwargs.items():
line.append(f'-e {key}={value}')
return ' '.join(line)
def get_file_version(name):
# print('get_version')
with open(history_path, 'r') as f:
content = f.read()
print('get_version:{}'.format(content))
return json.loads(content)[name]
def save_file_version(name, version_list):
old = None
with open(history_path, 'r') as f:
old = json.loads(f.read())
old[name] = version_list
with open(history_path, 'w') as f:
f.write(json.dumps(old))
def get_base_kwargs():
return dict(
proj_dir=proj_dir,
proj_name=proj_name,
python=python
)
def get_local_kwargs(**kwargs):
d = get_base_kwargs()
d.update(**kwargs)
d.update(server_name='{}_local'.format(proj_name),
cookie_expires=86400,
nginx_port=4002)
for (key, value) in d.items():
print(f'key={key},value={value}')
return d
@task
def deploy_local(c):
# deploy_tag = 'local'
kwargs = get_local_kwargs()
line_kwargs = get_line_args(kwargs)
ansible_cmd = f'ansible-playbook {proj_dir}/playbooks/deploy_local.yaml {line_kwargs}'
print('ansible_cmd:{}'.format(ansible_cmd))
c.run(ansible_cmd)
@task
def copy_files(c):
kwargs = get_local_kwargs()
line_kwargs = get_line_args(kwargs)
ansible_cmd = f"ansible-playbook {proj_dir}/playbooks/copy-files.yaml {line_kwargs}"
c.run(ansible_cmd)
@task
def c_push(c):
branch_name = get_branch_name(c)
c.run("git push origin {}".format(branch_name))
@task
def build(c):
generate(c)
names = [
"build",
"dist",
f"{proj_name}.egg-info"
]
for name in names:
abspath = os.path.join(proj_dir, name)
if os.path.exists(abspath):
print("remove {}".format(abspath))
c.run("rm -rf {}".format(abspath))
c.run("python setup.py sdist bdist_wheel")
@task
def generate(c):
readme_path = os.path.join(proj_dir, 'README.rst')
target_conf_path = os.path.join(proj_dir, proj_name, 'conf.py')
with open(readme_path, 'r') as read_f, open(target_conf_path, 'w') as write_f:
write_f.write('''doc = """
{}
"""\n'''.format(read_f.read()))
| 25.08547
| 90
| 0.627939
|
25ab7661bb6756a972cd3dbfd362f11deced8301
| 12,644
|
py
|
Python
|
src/twisted/mail/protocols.py
|
mathieui/twisted
|
35546d2b50742a32edba54719ce3e752dc50dd2a
|
[
"MIT",
"Unlicense"
] | 9,953
|
2019-04-03T23:41:04.000Z
|
2022-03-31T11:54:44.000Z
|
stackoverflow/venv/lib/python3.6/site-packages/twisted/mail/protocols.py
|
W4LKURE/learn_python3_spider
|
98dd354a41598b31302641f9a0ea49d1ecfa0fb1
|
[
"MIT"
] | 44
|
2019-05-27T10:59:29.000Z
|
2022-03-31T14:14:29.000Z
|
stackoverflow/venv/lib/python3.6/site-packages/twisted/mail/protocols.py
|
W4LKURE/learn_python3_spider
|
98dd354a41598b31302641f9a0ea49d1ecfa0fb1
|
[
"MIT"
] | 2,803
|
2019-04-06T13:15:33.000Z
|
2022-03-31T07:42:01.000Z
|
# -*- test-case-name: twisted.mail.test.test_mail -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Mail protocol support.
"""
from __future__ import absolute_import, division
from twisted.mail import pop3
from twisted.mail import smtp
from twisted.internet import protocol
from twisted.internet import defer
from twisted.copyright import longversion
from twisted.python import log
from twisted.cred.credentials import CramMD5Credentials, UsernamePassword
from twisted.cred.error import UnauthorizedLogin
from twisted.mail import relay
from zope.interface import implementer
@implementer(smtp.IMessageDelivery)
class DomainDeliveryBase:
"""
A base class for message delivery using the domains of a mail service.
@ivar service: See L{__init__}
@ivar user: See L{__init__}
@ivar host: See L{__init__}
@type protocolName: L{bytes}
@ivar protocolName: The protocol being used to deliver the mail.
Sub-classes should set this appropriately.
"""
service = None
protocolName = None
def __init__(self, service, user, host=smtp.DNSNAME):
"""
@type service: L{MailService}
@param service: A mail service.
@type user: L{bytes} or L{None}
@param user: The authenticated SMTP user.
@type host: L{bytes}
@param host: The hostname.
"""
self.service = service
self.user = user
self.host = host
def receivedHeader(self, helo, origin, recipients):
"""
Generate a received header string for a message.
@type helo: 2-L{tuple} of (L{bytes}, L{bytes})
@param helo: The client's identity as sent in the HELO command and its
IP address.
@type origin: L{Address}
@param origin: The origination address of the message.
@type recipients: L{list} of L{User}
@param recipients: The destination addresses for the message.
@rtype: L{bytes}
@return: A received header string.
"""
authStr = heloStr = b""
if self.user:
authStr = b" auth=" + self.user.encode('xtext')
if helo[0]:
heloStr = b" helo=" + helo[0]
fromUser = (b"from " + helo[0] + b" ([" + helo[1] + b"]" +
heloStr + authStr)
by = (b"by " + self.host + b" with " + self.protocolName +
b" (" + longversion.encode("ascii") + b")")
forUser = (b"for <" + b' '.join(map(bytes, recipients)) + b"> " +
smtp.rfc822date())
return (b"Received: " + fromUser + b"\n\t" + by +
b"\n\t" + forUser)
def validateTo(self, user):
"""
Validate the address for which a message is destined.
@type user: L{User}
@param user: The destination address.
@rtype: L{Deferred <defer.Deferred>} which successfully fires with
no-argument callable which returns L{IMessage <smtp.IMessage>}
provider.
@return: A deferred which successfully fires with a no-argument
callable which returns a message receiver for the destination.
@raise SMTPBadRcpt: When messages cannot be accepted for the
destination address.
"""
# XXX - Yick. This needs cleaning up.
if self.user and self.service.queue:
d = self.service.domains.get(user.dest.domain, None)
if d is None:
d = relay.DomainQueuer(self.service, True)
else:
d = self.service.domains[user.dest.domain]
return defer.maybeDeferred(d.exists, user)
def validateFrom(self, helo, origin):
"""
Validate the address from which a message originates.
@type helo: 2-L{tuple} of (L{bytes}, L{bytes})
@param helo: The client's identity as sent in the HELO command and its
IP address.
@type origin: L{Address}
@param origin: The origination address of the message.
@rtype: L{Address}
@return: The origination address.
@raise SMTPBadSender: When messages cannot be accepted from the
origination address.
"""
if not helo:
raise smtp.SMTPBadSender(origin, 503,
"Who are you? Say HELO first.")
if origin.local != b'' and origin.domain == b'':
raise smtp.SMTPBadSender(origin, 501,
"Sender address must contain domain.")
return origin
class SMTPDomainDelivery(DomainDeliveryBase):
"""
A domain delivery base class for use in an SMTP server.
"""
protocolName = b'smtp'
class ESMTPDomainDelivery(DomainDeliveryBase):
"""
A domain delivery base class for use in an ESMTP server.
"""
protocolName = b'esmtp'
class SMTPFactory(smtp.SMTPFactory):
"""
An SMTP server protocol factory.
@ivar service: See L{__init__}
@ivar portal: See L{__init__}
@type protocol: no-argument callable which returns a L{Protocol
<protocol.Protocol>} subclass
@ivar protocol: A callable which creates a protocol. The default value is
L{SMTP}.
"""
protocol = smtp.SMTP
portal = None
def __init__(self, service, portal = None):
"""
@type service: L{MailService}
@param service: An email service.
@type portal: L{Portal <twisted.cred.portal.Portal>} or
L{None}
@param portal: A portal to use for authentication.
"""
smtp.SMTPFactory.__init__(self)
self.service = service
self.portal = portal
def buildProtocol(self, addr):
"""
Create an instance of an SMTP server protocol.
@type addr: L{IAddress <twisted.internet.interfaces.IAddress>} provider
@param addr: The address of the SMTP client.
@rtype: L{SMTP}
@return: An SMTP protocol.
"""
log.msg('Connection from %s' % (addr,))
p = smtp.SMTPFactory.buildProtocol(self, addr)
p.service = self.service
p.portal = self.portal
return p
class ESMTPFactory(SMTPFactory):
"""
An ESMTP server protocol factory.
@type protocol: no-argument callable which returns a L{Protocol
<protocol.Protocol>} subclass
@ivar protocol: A callable which creates a protocol. The default value is
L{ESMTP}.
@type context: L{IOpenSSLContextFactory
<twisted.internet.interfaces.IOpenSSLContextFactory>} or L{None}
@ivar context: A factory to generate contexts to be used in negotiating
encrypted communication.
@type challengers: L{dict} mapping L{bytes} to no-argument callable which
returns L{ICredentials <twisted.cred.credentials.ICredentials>}
subclass provider.
@ivar challengers: A mapping of acceptable authorization mechanism to
callable which creates credentials to use for authentication.
"""
protocol = smtp.ESMTP
context = None
def __init__(self, *args):
"""
@param args: Arguments for L{SMTPFactory.__init__}
@see: L{SMTPFactory.__init__}
"""
SMTPFactory.__init__(self, *args)
self.challengers = {
b'CRAM-MD5': CramMD5Credentials
}
def buildProtocol(self, addr):
"""
Create an instance of an ESMTP server protocol.
@type addr: L{IAddress <twisted.internet.interfaces.IAddress>} provider
@param addr: The address of the ESMTP client.
@rtype: L{ESMTP}
@return: An ESMTP protocol.
"""
p = SMTPFactory.buildProtocol(self, addr)
p.challengers = self.challengers
p.ctx = self.context
return p
class VirtualPOP3(pop3.POP3):
"""
A virtual hosting POP3 server.
@type service: L{MailService}
@ivar service: The email service that created this server. This must be
set by the service.
@type domainSpecifier: L{bytes}
@ivar domainSpecifier: The character to use to split an email address into
local-part and domain. The default is '@'.
"""
service = None
domainSpecifier = b'@' # Gaagh! I hate POP3. No standardized way
# to indicate user@host. '@' doesn't work
# with NS, e.g.
def authenticateUserAPOP(self, user, digest):
"""
Perform APOP authentication.
Override the default lookup scheme to allow virtual domains.
@type user: L{bytes}
@param user: The name of the user attempting to log in.
@type digest: L{bytes}
@param digest: The challenge response.
@rtype: L{Deferred} which successfully results in 3-L{tuple} of
(L{IMailbox <pop3.IMailbox>}, L{IMailbox <pop3.IMailbox>}
provider, no-argument callable)
@return: A deferred which fires when authentication is complete.
If successful, it returns an L{IMailbox <pop3.IMailbox>} interface,
a mailbox and a logout function. If authentication fails, the
deferred fails with an L{UnauthorizedLogin
<twisted.cred.error.UnauthorizedLogin>} error.
"""
user, domain = self.lookupDomain(user)
try:
portal = self.service.lookupPortal(domain)
except KeyError:
return defer.fail(UnauthorizedLogin())
else:
return portal.login(
pop3.APOPCredentials(self.magic, user, digest),
None,
pop3.IMailbox
)
def authenticateUserPASS(self, user, password):
"""
Perform authentication for a username/password login.
Override the default lookup scheme to allow virtual domains.
@type user: L{bytes}
@param user: The name of the user attempting to log in.
@type password: L{bytes}
@param password: The password to authenticate with.
@rtype: L{Deferred} which successfully results in 3-L{tuple} of
(L{IMailbox <pop3.IMailbox>}, L{IMailbox <pop3.IMailbox>}
provider, no-argument callable)
@return: A deferred which fires when authentication is complete.
If successful, it returns an L{IMailbox <pop3.IMailbox>} interface,
a mailbox and a logout function. If authentication fails, the
deferred fails with an L{UnauthorizedLogin
<twisted.cred.error.UnauthorizedLogin>} error.
"""
user, domain = self.lookupDomain(user)
try:
portal = self.service.lookupPortal(domain)
except KeyError:
return defer.fail(UnauthorizedLogin())
else:
return portal.login(
UsernamePassword(user, password),
None,
pop3.IMailbox
)
def lookupDomain(self, user):
"""
Check whether a domain is among the virtual domains supported by the
mail service.
@type user: L{bytes}
@param user: An email address.
@rtype: 2-L{tuple} of (L{bytes}, L{bytes})
@return: The local part and the domain part of the email address if the
domain is supported.
@raise POP3Error: When the domain is not supported by the mail service.
"""
try:
user, domain = user.split(self.domainSpecifier, 1)
except ValueError:
domain = b''
if domain not in self.service.domains:
raise pop3.POP3Error(
"no such domain {}".format(domain.decode("utf-8")))
return user, domain
class POP3Factory(protocol.ServerFactory):
"""
A POP3 server protocol factory.
@ivar service: See L{__init__}
@type protocol: no-argument callable which returns a L{Protocol
<protocol.Protocol>} subclass
@ivar protocol: A callable which creates a protocol. The default value is
L{VirtualPOP3}.
"""
protocol = VirtualPOP3
service = None
def __init__(self, service):
"""
@type service: L{MailService}
@param service: An email service.
"""
self.service = service
def buildProtocol(self, addr):
"""
Create an instance of a POP3 server protocol.
@type addr: L{IAddress <twisted.internet.interfaces.IAddress>} provider
@param addr: The address of the POP3 client.
@rtype: L{POP3}
@return: A POP3 protocol.
"""
p = protocol.ServerFactory.buildProtocol(self, addr)
p.service = self.service
return p
| 31.219753
| 79
| 0.611594
|
f7837d743674d88e46151daeb73135fb208d145f
| 2,795
|
py
|
Python
|
gtfs/schedule.py
|
eoghanmurray/gtfs-sql
|
49c20747db88e7e218fb4fc83b61fbc0b37f552d
|
[
"MIT"
] | 2
|
2020-05-21T21:34:04.000Z
|
2021-05-22T15:12:51.000Z
|
gtfs/schedule.py
|
eoghanmurray/gtfs-sql
|
49c20747db88e7e218fb4fc83b61fbc0b37f552d
|
[
"MIT"
] | null | null | null |
gtfs/schedule.py
|
eoghanmurray/gtfs-sql
|
49c20747db88e7e218fb4fc83b61fbc0b37f552d
|
[
"MIT"
] | null | null | null |
import sqlalchemy
import sqlalchemy.orm
from entity import *
class Schedule:
"""Represents a full GTFS data set."""
def __init__(self, db_connection):
self.db_connection = db_connection
self.db_filename = None
if '://' not in db_connection:
self.db_connection = 'sqlite:///%s' % self.db_connection
if self.db_connection.startswith('sqlite'):
self.db_filename = self.db_connection
self.engine = sqlalchemy.create_engine(self.db_connection, echo=False)
Session = sqlalchemy.orm.sessionmaker(bind=self.engine)
self.session = Session()
@property
def agencies(self):
return self.session.query(Agency).all()
@property
def agencies_by_id(self):
return dict(zip([x.agency_id for x in self.agencies], self.agencies))
@property
def stops(self):
return self.session.query(Stop).all()
@property
def stops_by_id(self):
return dict(zip([x.stop_id for x in self.stops], self.stops))
@property
def routes(self):
return self.session.query(Route).all()
@property
def routes_by_id(self):
return dict(zip([x.route_id for x in self.routes], self.routes))
@property
def services(self):
return self.session.query(Service).all()
@property
def services_by_id(self):
return dict(zip([x.service_id for x in self.services], self.services))
@property
def service_exceptions(self):
return self.session.query(ServiceException).all()
@property
def trips(self):
return self.session.query(Trip).all()
@property
def trips_by_id(self):
return dict(zip([x.trip_id for x in self.trips], self.trips))
@property
def stop_times(self):
return self.session.query(StopTime).all()
@property
def fares(self):
return self.session.query(Fare).all()
@property
def fares_by_id(self):
return dict(zip([x.fare_id for x in self.fares], self.fares))
@property
def fare_rules(self):
return self.session.query(FareRule).all()
@property
def shape_points(self):
return self.session.query(ShapePoint).all()
@property
def shape_points_by_id(self):
return dict(zip([x.shape_id for x in self.shape_points], self.shape_points))
@property
def frequencies(self):
return self.session.query(Frequency).all()
@property
def transfers(self):
return self.session.query(Transfer).all()
@property
def feed_info(self):
return self.session.query(FeedInfo).all()
def create_tables(self, metadata):
metadata.create_all(self.engine)
| 27.673267
| 84
| 0.627191
|
e8ec74b5d73d43f1c297e19ef7d7f28cbc67235f
| 2,781
|
py
|
Python
|
tests/misc/non_compliant.py
|
kaffir/circuitpython
|
0930f7a7972dc006c079102e292babb1ae02aa1a
|
[
"MIT"
] | 5
|
2017-07-17T23:28:09.000Z
|
2020-06-16T17:28:47.000Z
|
tests/misc/non_compliant.py
|
kaffir/circuitpython
|
0930f7a7972dc006c079102e292babb1ae02aa1a
|
[
"MIT"
] | null | null | null |
tests/misc/non_compliant.py
|
kaffir/circuitpython
|
0930f7a7972dc006c079102e292babb1ae02aa1a
|
[
"MIT"
] | null | null | null |
# tests for things that are not implemented, or have non-compliant behaviour
import array
import ustruct
# when super can't find self
try:
exec('def f(): super()')
except SyntaxError:
print('SyntaxError')
# store to exception attribute is not allowed
try:
ValueError().x = 0
except AttributeError:
print('AttributeError')
# array deletion not implemented
try:
a = array.array('b', (1, 2, 3))
del a[1]
except TypeError:
print('TypeError')
# slice with step!=1 not implemented
try:
a = array.array('b', (1, 2, 3))
print(a[3:2:2])
except NotImplementedError:
print('NotImplementedError')
# containment, looking for integer not implemented
try:
print(1 in array.array('B', b'12'))
except NotImplementedError:
print('NotImplementedError')
# should raise type error
try:
print(set('12') >= '1')
except TypeError:
print('TypeError')
# should raise type error
try:
print(set('12') <= '123')
except TypeError:
print('TypeError')
# uPy raises TypeError, shold be ValueError
try:
'%c' % b'\x01\x02'
except (TypeError, ValueError):
print('TypeError, ValueError')
# attributes/subscr not implemented
try:
print('{a[0]}'.format(a=[1, 2]))
except NotImplementedError:
print('NotImplementedError')
# str(...) with keywords not implemented
try:
str(b'abc', encoding='utf8')
except NotImplementedError:
print('NotImplementedError')
# str.rsplit(None, n) not implemented
try:
'a a a'.rsplit(None, 1)
except NotImplementedError:
print('NotImplementedError')
# str.endswith(s, start) not implemented
try:
'abc'.endswith('c', 1)
except NotImplementedError:
print('NotImplementedError')
# str subscr with step!=1 not implemented
try:
print('abc'[1:2:3])
except NotImplementedError:
print('NotImplementedError')
# bytes(...) with keywords not implemented
try:
bytes('abc', encoding='utf8')
except NotImplementedError:
print('NotImplementedError')
# bytes subscr with step!=1 not implemented
try:
b'123'[0:3:2]
except NotImplementedError:
print('NotImplementedError')
# tuple load with step!=1 not implemented
try:
()[2:3:4]
except NotImplementedError:
print('NotImplementedError')
# list store with step!=1 not implemented
try:
[][2:3:4] = []
except NotImplementedError:
print('NotImplementedError')
# list delete with step!=1 not implemented
try:
del [][2:3:4]
except NotImplementedError:
print('NotImplementedError')
# struct pack with too many args, not checked by uPy
print(ustruct.pack('bb', 1, 2, 3))
# struct pack with too few args, not checked by uPy
print(ustruct.pack('bb', 1))
# array slice assignment with unsupported RHS
try:
bytearray(4)[0:1] = [1, 2]
except NotImplementedError:
print('NotImplementedError')
| 21.897638
| 76
| 0.696152
|
d900d5ef5e03534d642cb87074e02652ce3be0f2
| 7,089
|
py
|
Python
|
drf_firebase_auth/authentication.py
|
Arka-cell/drf-firebase-auth
|
7a76b0d2300964962ff0dfa3de0f513a1bc6a9ba
|
[
"MIT"
] | null | null | null |
drf_firebase_auth/authentication.py
|
Arka-cell/drf-firebase-auth
|
7a76b0d2300964962ff0dfa3de0f513a1bc6a9ba
|
[
"MIT"
] | null | null | null |
drf_firebase_auth/authentication.py
|
Arka-cell/drf-firebase-auth
|
7a76b0d2300964962ff0dfa3de0f513a1bc6a9ba
|
[
"MIT"
] | 4
|
2021-07-23T10:14:36.000Z
|
2021-12-06T14:49:51.000Z
|
# -*- coding: utf-8 -*-
"""
Authentication backend for handling firebase user.idToken from incoming
Authorization header, verifying, and locally authenticating
"""
from typing import Tuple, Dict
import logging
import firebase_admin
from firebase_admin import auth as firebase_auth
from django.utils.encoding import smart_text
from django.utils import timezone
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser
from rest_framework import (
authentication,
exceptions
)
from .settings import api_settings
from .models import (
FirebaseUser,
FirebaseUserProvider
)
from .utils import get_firebase_user_uid, get_firebase_user_identifier
from . import __title__
log = logging.getLogger(__title__)
User = get_user_model()
firebase_credentials = firebase_admin.credentials.Certificate(
api_settings.FIREBASE_SERVICE_ACCOUNT_KEY
)
firebase = firebase_admin.initialize_app(
firebase_credentials, api_settings.STORAGE_BUCKET
)
class FirebaseAuthentication(authentication.TokenAuthentication):
"""
Token based authentication using firebase.
"""
keyword = api_settings.FIREBASE_AUTH_HEADER_PREFIX
def authenticate_credentials(
self,
token: str
) -> Tuple[AnonymousUser, Dict]:
try:
decoded_token = self._decode_token(token)
firebase_user = self._authenticate_token(decoded_token)
local_user = self._get_or_create_local_user(firebase_user)
self._create_local_firebase_user(local_user, firebase_user)
return (local_user, decoded_token)
except Exception as e:
raise exceptions.AuthenticationFailed(e)
def _decode_token(self, token: str) -> Dict:
"""
Attempt to verify JWT from Authorization header with Firebase and
return the decoded token
"""
try:
decoded_token = firebase_auth.verify_id_token(
token,
check_revoked=api_settings.FIREBASE_CHECK_JWT_REVOKED
)
log.info(f'_decode_token - decoded_token: {decoded_token}')
return decoded_token
except Exception as e:
log.error(f'_decode_token - Exception: {e}')
raise Exception(e)
def _authenticate_token(
self,
decoded_token: Dict
) -> firebase_auth.UserRecord:
""" Returns firebase user if token is authenticated """
try:
uid = decoded_token.get('uid')
log.info(f'_authenticate_token - uid: {uid}')
firebase_user = firebase_auth.get_user(uid)
log.info(f'_authenticate_token - firebase_user: {firebase_user}')
if api_settings.FIREBASE_AUTH_EMAIL_VERIFICATION:
if not firebase_user.email_verified:
raise Exception(
'Email address of this user has not been verified.'
)
return firebase_user
except Exception as e:
log.error(f'_authenticate_token - Exception: {e}')
raise Exception(e)
def _get_or_create_local_user(
self,
firebase_user: firebase_auth.UserRecord
) -> User:
"""
Attempts to return or create a local User from Firebase user data
"""
uid = get_firebase_user_uid(firebase_user)
identifier = get_firebase_user_identifier(firebase_user)
log.info(f'_get_or_create_local_user - email: {identifier}')
user = None
try:
user = User.objects.get(username=uid)
log.info(
f'_get_or_create_local_user - user.is_active: {user.is_active}'
)
if not user.is_active:
raise Exception(
'User account is not currently active.'
)
user.last_login = timezone.now()
user.save()
except User.DoesNotExist as e:
log.error(
f'_get_or_create_local_user - User.DoesNotExist: {identifier}'
)
if not api_settings.FIREBASE_CREATE_LOCAL_USER:
raise Exception('User is not registered to the application.')
username = \
api_settings.FIREBASE_USERNAME_MAPPING_FUNC(firebase_user)
log.info(
f'_get_or_create_local_user - username: {uid}'
)
try:
user = User.objects.create_user(
username=uid,
email=identifier
)
user.last_login = timezone.now()
if (
api_settings.FIREBASE_ATTEMPT_CREATE_WITH_DISPLAY_NAME
and firebase_user.display_name is not None
):
display_name = firebase_user.display_name.split(' ')
if len(display_name) == 2:
user.first_name = display_name[0]
user.last_name = display_name[1]
user.save()
except Exception as e:
raise Exception(e)
return user
def _create_local_firebase_user(
self,
user: User,
firebase_user: firebase_auth.UserRecord
):
""" Create a local FireBase model if one does not already exist """
# pylint: disable=no-member
local_firebase_user = FirebaseUser.objects.filter(
user=user
).first()
if not local_firebase_user:
new_firebase_user = FirebaseUser(
uid=firebase_user.uid,
user=user
)
new_firebase_user.save()
local_firebase_user = new_firebase_user
if local_firebase_user.uid != firebase_user.uid:
local_firebase_user.uid = firebase_user.uid
local_firebase_user.save()
# store FirebaseUserProvider data
for provider in firebase_user.provider_data:
local_provider = FirebaseUserProvider.objects.filter(
provider_id=provider.provider_id,
firebase_user=local_firebase_user
).first()
if not local_provider:
new_local_provider = FirebaseUserProvider.objects.create(
provider_id=provider.provider_id,
uid=provider.uid,
firebase_user=local_firebase_user,
)
new_local_provider.save()
# catch locally stored providers no longer associated at Firebase
local_providers = FirebaseUserProvider.objects.filter(
firebase_user=local_firebase_user
)
if len(local_providers) != len(firebase_user.provider_data):
current_providers = \
[x.provider_id for x in firebase_user.provider_data]
for provider in local_providers:
if provider.provider_id not in current_providers:
FirebaseUserProvider.objects.filter(
id=provider.id
).delete()
| 36.353846
| 79
| 0.613345
|
6967f9be59604400f1306cfebe81b856c2e207cf
| 1,603
|
py
|
Python
|
aldryn_redirects/middleware.py
|
what-digital/aldryn-redirects
|
a0320ee87cfa0e1b6709cda4aa88dd92542c225a
|
[
"BSD-3-Clause"
] | null | null | null |
aldryn_redirects/middleware.py
|
what-digital/aldryn-redirects
|
a0320ee87cfa0e1b6709cda4aa88dd92542c225a
|
[
"BSD-3-Clause"
] | null | null | null |
aldryn_redirects/middleware.py
|
what-digital/aldryn-redirects
|
a0320ee87cfa0e1b6709cda4aa88dd92542c225a
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import unicode_literals
from django import http
from django.conf import settings
from django.contrib.sites.models import Site
from django.db.models import Q
from django.utils.deprecation import MiddlewareMixin
from .models import Redirect, StaticRedirect
class RedirectFallbackMiddleware(MiddlewareMixin):
def process_request(self, request):
static_redirect = StaticRedirect.objects.get_for_request(request)
if static_redirect:
full_domain = '{}://{}'.format(request.scheme, Site.objects.get(id=settings.SITE_ID).domain)
return http.HttpResponsePermanentRedirect(static_redirect.get_outbound_url(full_domain))
path = request.path_info
path_with_queries = request.get_full_path()
queries = (
Q(old_path__iexact=path)
| Q(old_path__iexact=path_with_queries)
)
if settings.APPEND_SLASH and path.endswith('/'):
path_with_queries_no_slash = path[:-1] + path_with_queries[len(path):]
queries |= (
Q(old_path__iexact=path[:-1])
| Q(old_path__iexact=path_with_queries_no_slash)
)
try:
r = Redirect.objects.filter(
queries,
site__id__exact=settings.SITE_ID
).distinct().get()
except Redirect.DoesNotExist:
return
new_path = r.safe_translation_getter('new_path', any_language=True)
if new_path in (None, ''):
return http.HttpResponseGone()
return http.HttpResponsePermanentRedirect(new_path)
| 34.847826
| 104
| 0.663132
|
0805da27a60c16649cffe181cee3192fd1d74606
| 336
|
py
|
Python
|
project/com/vo/BranchVO.py
|
sahilshah8141/ChequeClearanceSystem
|
f02efeb45b950be8bb34a35a399a358e7eeed03b
|
[
"Apache-2.0"
] | null | null | null |
project/com/vo/BranchVO.py
|
sahilshah8141/ChequeClearanceSystem
|
f02efeb45b950be8bb34a35a399a358e7eeed03b
|
[
"Apache-2.0"
] | null | null | null |
project/com/vo/BranchVO.py
|
sahilshah8141/ChequeClearanceSystem
|
f02efeb45b950be8bb34a35a399a358e7eeed03b
|
[
"Apache-2.0"
] | null | null | null |
from wtforms import *
class BranchVO:
branchId = IntegerField
branchName = StringField
branchIFSCCode = StringField
branchContact = StringField
branchEmail = StringField
branch_CityId = IntegerField
branch_AreaId = IntegerField
branch_BankId = IntegerField
branchActiveStatus = StringField
| 15.272727
| 36
| 0.732143
|
2bc2f07e189602f6cd884ab2ac28d62765dcbcf4
| 42,877
|
py
|
Python
|
src/sage/schemes/elliptic_curves/sha_tate.py
|
hsm207/sage
|
020bd59ec28717bfab9af44d2231c53da1ff99f1
|
[
"BSL-1.0"
] | 4
|
2020-07-17T04:49:44.000Z
|
2020-07-29T06:33:51.000Z
|
src/sage/schemes/elliptic_curves/sha_tate.py
|
Ivo-Maffei/sage
|
467fbc70a08b552b3de33d9065204ee9cbfb02c7
|
[
"BSL-1.0"
] | 1
|
2020-04-18T16:30:43.000Z
|
2020-04-18T16:30:43.000Z
|
src/sage/schemes/elliptic_curves/sha_tate.py
|
dimpase/sage
|
468f23815ade42a2192b0a9cd378de8fdc594dcd
|
[
"BSL-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
r"""
Tate-Shafarevich group
If `E` is an elliptic curve over a global field `K`, the Tate-Shafarevich
group is the subgroup of elements in `H^1(K,E)` which map to zero under every
global-to-local restriction map `H^1(K,E) \to H^1(K_v,E)`, one for each place
`v` of `K`.
The group is usually denoted by the Russian letter Sha (Ш), in this document
it will be denoted by `Sha`.
`Sha` is known to be an abelian torsion group. It is conjectured that the
Tate-Shafarevich group is finite for any elliptic curve over a global field.
But it is not known in general.
A theorem of Kolyvagin and Gross-Zagier using Heegner points shows that if the
L-series of an elliptic curve `E/\QQ` does not vanish at 1 or has a simple
zero there, then `Sha` is finite.
A theorem of Kato, together with theorems from Iwasawa theory, allows for
certain primes `p` to show that the `p`-primary part of `Sha` is finite and
gives an effective upper bound for it.
The (`p`-adic) conjecture of Birch and Swinnerton-Dyer predicts the order of
`Sha` from the leading term of the (`p`-adic) L-series of the elliptic curve.
Sage can compute a few things about `Sha`. The commands ``an``,
``an_numerical`` and ``an_padic`` compute the conjectural order of `Sha` as a
real or `p`-adic number. With ``p_primary_bound`` one can find an upper bound
of the size of the `p`-primary part of `Sha`. Finally, if the analytic rank is
at most 1, then ``bound_kato`` and ``bound_kolyvagin`` find all primes for
which the theorems of Kato and Kolyvagin respectively do not prove the
triviality the `p`-primary part of `Sha`.
EXAMPLES::
sage: E = EllipticCurve('11a1')
sage: S = E.sha()
sage: S.bound_kato()
[2]
sage: S.bound_kolyvagin()
([2, 5], 1)
sage: S.an_padic(7,3)
1 + O(7^5)
sage: S.an()
1
sage: S.an_numerical()
1.00000000000000
sage: E = EllipticCurve('389a')
sage: S = E.sha(); S
Tate-Shafarevich group for the Elliptic Curve defined by y^2 + y = x^3 + x^2 - 2*x over Rational Field
sage: S.an_numerical()
1.00000000000000
sage: S.p_primary_bound(5)
0
sage: S.an_padic(5)
1 + O(5)
sage: S.an_padic(5,prec=4) # long time (2s on sage.math, 2011)
1 + O(5^3)
AUTHORS:
- William Stein (2007) -- initial version
- Chris Wuthrich (April 2009) -- reformat docstrings
- Aly Deines, Chris Wuthrich, Jeaninne Van Order (2016-03): Added
functionality that tests the Skinner-Urban condition.
"""
# ****************************************************************************
# Copyright (C) 2007 William Stein <wstein@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
from sage.structure.sage_object import SageObject
from sage.rings.all import (
Integer,
RealField,
RationalField,
RIF,
ZZ)
from sage.functions.log import log
from math import sqrt
from sage.misc.verbose import verbose
import sage.arith.all as arith
from sage.rings.padics.factory import Qp
from sage.modules.free_module_element import vector
factor = arith.factor
valuation = arith.valuation
Q = RationalField()
class Sha(SageObject):
r"""
The Tate-Shafarevich group associated to an elliptic curve.
If `E` is an elliptic curve over a global field `K`, the Tate-Shafarevich
group is the subgroup of elements in `H^1(K,E)` which map to zero under
every global-to-local restriction map `H^1(K,E) \to H^1(K_v,E)`, one for
each place `v` of `K`.
EXAMPLES::
sage: E = EllipticCurve('571a1')
sage: E._set_gens([]) # curve has rank 0, but non-trivial Sha[2]
sage: S = E.sha()
sage: S.bound_kato()
[2]
sage: S.bound_kolyvagin()
([2], 1)
sage: S.an_padic(7,3)
4 + O(7^5)
sage: S.an()
4
sage: S.an_numerical()
4.00000000000000
sage: E = EllipticCurve('389a')
sage: S = E.sha(); S
Tate-Shafarevich group for the Elliptic Curve defined by y^2 + y = x^3 + x^2 - 2*x over Rational Field
sage: S.an_numerical()
1.00000000000000
sage: S.p_primary_bound(5) # long time
0
sage: S.an_padic(5) # long time
1 + O(5)
sage: S.an_padic(5,prec=4) # very long time
1 + O(5^3)
"""
def __init__(self, E):
r"""
The Tate-Shafarevich group associated to an elliptic curve.
INPUT:
- E -- an elliptic curve over `\QQ`
EXAMPLES::
sage: E = EllipticCurve('11a1')
sage: S = E.sha()
sage: S
Tate-Shafarevich group for the Elliptic Curve defined by y^2 + y = x^3 - x^2 - 10*x - 20 over Rational Field
sage: S == loads(dumps(S))
True
"""
self.E = E
self.Emin = E.minimal_model() if not E.is_minimal() else E
def __eq__(self, other):
r"""
Compare two Tate-Shafarevich groups by simply comparing the
elliptic curves.
EXAMPLES::
sage: E = EllipticCurve('37a1')
sage: S = E.sha()
sage: S == S
True
"""
if not isinstance(other, Sha):
return False
return self.E == other.E
def __ne__(self, other):
"""
Check whether ``self`` is not equal to ``other``.
EXAMPLES::
sage: E = EllipticCurve('37a1')
sage: S = E.sha()
sage: S != S
False
"""
return not (self == other)
def __repr__(self):
r"""
String representation of the Tate-Shafarevich group.
EXAMPLES::
sage: E = EllipticCurve('11a1')
sage: S = E.sha()
sage: S.__repr__()
'Tate-Shafarevich group for the Elliptic Curve defined by y^2 + y = x^3 - x^2 - 10*x - 20 over Rational Field'
"""
return "Tate-Shafarevich group for the " + repr(self.E)
########################################################################
# Functions related to the BSD conjecture.
########################################################################
def an_numerical(self, prec=None,
use_database=True, proof=None):
r"""
Return the numerical analytic order of `Sha`, which is
a floating point number in all cases.
INPUT:
- ``prec`` - integer (default: 53) bits precision -- used
for the L-series computation, period, regulator, etc.
- ``use_database`` - whether the rank and generators should
be looked up in the database if possible. Default is ``True``
- ``proof`` - bool or ``None`` (default: ``None``, see proof.[tab] or
sage.structure.proof) proof option passed
onto regulator and rank computation.
.. note::
See also the :meth:`an` command, which will return a
provably correct integer when the rank is 0 or 1.
.. WARNING::
If the curve's generators are not known, computing
them may be very time-consuming. Also, computation of the
L-series derivative will be time-consuming for large rank and
large conductor, and the computation time for this may
increase substantially at greater precision. However, use of
very low precision less than about 10 can cause the underlying
PARI library functions to fail.
EXAMPLES::
sage: EllipticCurve('11a').sha().an_numerical()
1.00000000000000
sage: EllipticCurve('37a').sha().an_numerical()
1.00000000000000
sage: EllipticCurve('389a').sha().an_numerical()
1.00000000000000
sage: EllipticCurve('66b3').sha().an_numerical()
4.00000000000000
sage: EllipticCurve('5077a').sha().an_numerical()
1.00000000000000
A rank 4 curve::
sage: EllipticCurve([1, -1, 0, -79, 289]).sha().an_numerical() # long time (3s on sage.math, 2011)
1.00000000000000
A rank 5 curve::
sage: EllipticCurve([0, 0, 1, -79, 342]).sha().an_numerical(prec=10, proof=False) # long time (22s on sage.math, 2011)
1.0
See :trac:`1115`::
sage: sha = EllipticCurve('37a1').sha()
sage: [sha.an_numerical(prec) for prec in range(40,100,10)] # long time (3s on sage.math, 2013)
[1.0000000000,
1.0000000000000,
1.0000000000000000,
1.0000000000000000000,
1.0000000000000000000000,
1.0000000000000000000000000]
"""
if prec is None:
prec = RealField().precision()
RR = RealField(prec)
prec2 = prec + 2
RR2 = RealField(prec2)
try:
an = self.__an_numerical
if an.parent().precision() >= prec:
return RR(an)
else: # cached precision too low
pass
except AttributeError:
pass
# it's critical to switch to the minimal model.
E = self.Emin
r = Integer(E.rank(use_database=use_database, proof=proof))
L = E.lseries().dokchitser(prec=prec2)
Lr = RR2(L.derivative(1, r)) # L.derivative() returns a Complex
Om = RR2(E.period_lattice().omega(prec2))
Reg = E.regulator(use_database=use_database, proof=proof, precision=prec2)
T = E.torsion_order()
cp = E.tamagawa_product()
Sha = RR((Lr * T * T) / (r.factorial() * Om * cp * Reg))
self.__an_numerical = Sha
return Sha
def an(self, use_database=False, descent_second_limit=12):
r"""
Returns the Birch and Swinnerton-Dyer conjectural order of `Sha`
as a provably correct integer, unless the analytic rank is > 1,
in which case this function returns a numerical value.
INPUT:
- ``use_database`` -- bool (default: ``False``); if ``True``, try
to use any databases installed to lookup the analytic order of
`Sha`, if possible. The order of `Sha` is computed if it cannot
be looked up.
- ``descent_second_limit`` -- int (default: 12); limit to use on
point searching for the quartic twist in the hard case
This result is proved correct if the order of vanishing is 0
and the Manin constant is <= 2.
If the optional parameter ``use_database`` is ``True`` (default:
``False``), this function returns the analytic order of `Sha` as
listed in Cremona's tables, if this curve appears in Cremona's
tables.
NOTE:
If you come across the following error::
sage: E = EllipticCurve([0, 0, 1, -34874, -2506691])
sage: E.sha().an()
Traceback (most recent call last):
...
RuntimeError: Unable to compute the rank, hence generators, with certainty (lower bound=0, generators found=[]). This could be because Sha(E/Q)[2] is nontrivial.
Try increasing descent_second_limit then trying this command again.
You can increase the ``descent_second_limit`` (in the above example,
set to the default, 12) option to try again::
sage: E.sha().an(descent_second_limit=16) # long time (2s on sage.math, 2011)
1
EXAMPLES::
sage: E = EllipticCurve([0, -1, 1, -10, -20]) # 11A = X_0(11)
sage: E.sha().an()
1
sage: E = EllipticCurve([0, -1, 1, 0, 0]) # X_1(11)
sage: E.sha().an()
1
sage: EllipticCurve('14a4').sha().an()
1
sage: EllipticCurve('14a4').sha().an(use_database=True) # will be faster if you have large Cremona database installed
1
The smallest conductor curve with nontrivial `Sha`::
sage: E = EllipticCurve([1,1,1,-352,-2689]) # 66b3
sage: E.sha().an()
4
The four optimal quotients with nontrivial `Sha` and conductor <= 1000::
sage: E = EllipticCurve([0, -1, 1, -929, -10595]) # 571A
sage: E.sha().an()
4
sage: E = EllipticCurve([1, 1, 0, -1154, -15345]) # 681B
sage: E.sha().an()
9
sage: E = EllipticCurve([0, -1, 0, -900, -10098]) # 960D
sage: E.sha().an()
4
sage: E = EllipticCurve([0, 1, 0, -20, -42]) # 960N
sage: E.sha().an()
4
The smallest conductor curve of rank > 1::
sage: E = EllipticCurve([0, 1, 1, -2, 0]) # 389A (rank 2)
sage: E.sha().an()
1.00000000000000
The following are examples that require computation of the Mordell-
Weil group and regulator::
sage: E = EllipticCurve([0, 0, 1, -1, 0]) # 37A (rank 1)
sage: E.sha().an()
1
sage: E = EllipticCurve("1610f3")
sage: E.sha().an()
4
In this case the input curve is not minimal, and if this function did
not transform it to be minimal, it would give nonsense::
sage: E = EllipticCurve([0,-432*6^2])
sage: E.sha().an()
1
See :trac:`10096`: this used to give the wrong result 6.0000
before since the minimal model was not used::
sage: E = EllipticCurve([1215*1216,0]) # non-minimal model
sage: E.sha().an() # long time (2s on sage.math, 2011)
1.00000000000000
sage: E.minimal_model().sha().an() # long time (1s on sage.math, 2011)
1.00000000000000
"""
if hasattr(self, '__an'):
return self.__an
if use_database:
d = self.Emin.database_curve()
if hasattr(d, 'db_extra'):
self.__an = Integer(round(float(d.db_extra[4])))
return self.__an
# it's critical to switch to the minimal model.
E = self.Emin
eps = E.root_number()
if eps == 1:
L1_over_omega = E.lseries().L_ratio()
if L1_over_omega == 0: # order of vanishing is at least 2
return self.an_numerical(use_database=use_database)
T = E.torsion_subgroup().order()
Sha = (L1_over_omega * T * T) / Q(E.tamagawa_product())
try:
Sha = Integer(Sha)
except ValueError:
raise RuntimeError("There is a bug in an, since the computed conjectural order of Sha is %s, which is not an integer." % Sha)
if not arith.is_square(Sha):
raise RuntimeError("There is a bug in an, since the computed conjectural order of Sha is %s, which is not a square." % Sha)
E.__an = Sha
self.__an = Sha
return Sha
else: # rank > 0 (Not provably correct)
L1, error_bound = E.lseries().deriv_at1(10*sqrt(E.conductor()) + 10)
if abs(L1) < error_bound:
s = self.an_numerical()
E.__an = s
self.__an = s
return s
regulator = E.regulator(use_database=use_database, descent_second_limit=descent_second_limit)
T = E.torsion_subgroup().order()
omega = E.period_lattice().omega()
Sha = ((L1 * T * T) / (E.tamagawa_product() * regulator * omega)).round()
try:
Sha = Integer(Sha)
except ValueError:
raise RuntimeError("There is a bug in an, since the computed conjectural order of Sha is %s, which is not an integer." % Sha)
if not arith.is_square(Sha):
raise RuntimeError("There is a bug in an, since the computed conjectural order of Sha is %s, which is not a square." % Sha)
E.__an = Sha
self.__an = Sha
return Sha
def an_padic(self, p, prec=0, use_twists=True):
r"""
Returns the conjectural order of `Sha(E/\QQ)`,
according to the `p`-adic analogue of the Birch
and Swinnerton-Dyer conjecture as formulated
in [MTT1986]_ and [BP1993]_.
INPUT:
- ``p`` - a prime > 3
- ``prec`` (optional) - the precision used in the computation of the
`p`-adic L-Series
- ``use_twists`` (default = ``True``) - If ``True`` the algorithm may
change to a quadratic twist with minimal conductor to do the modular
symbol computations rather than using the modular symbols of the
curve itself. If ``False`` it forces the computation using the
modular symbols of the curve itself.
OUTPUT: `p`-adic number - that conjecturally equals `\# Sha(E/\QQ)`.
If ``prec`` is set to zero (default) then the precision is set so that
at least the first `p`-adic digit of conjectural `\# Sha(E/\QQ)` is
determined.
EXAMPLES:
Good ordinary examples::
sage: EllipticCurve('11a1').sha().an_padic(5) # rank 0
1 + O(5^22)
sage: EllipticCurve('43a1').sha().an_padic(5) # rank 1
1 + O(5)
sage: EllipticCurve('389a1').sha().an_padic(5,4) # rank 2, long time (2s on sage.math, 2011)
1 + O(5^3)
sage: EllipticCurve('858k2').sha().an_padic(7) # rank 0, non trivial sha, long time (10s on sage.math, 2011)
7^2 + O(7^24)
sage: EllipticCurve('300b2').sha().an_padic(3) # 9 elements in sha, long time (2s on sage.math, 2011)
3^2 + O(3^24)
sage: EllipticCurve('300b2').sha().an_padic(7, prec=6) # long time
2 + 7 + O(7^8)
Exceptional cases::
sage: EllipticCurve('11a1').sha().an_padic(11) # rank 0
1 + O(11^22)
sage: EllipticCurve('130a1').sha().an_padic(5) # rank 1
1 + O(5)
Non-split, but rank 0 case (:trac:`7331`)::
sage: EllipticCurve('270b1').sha().an_padic(5) # rank 0, long time (2s on sage.math, 2011)
1 + O(5^22)
The output has the correct sign::
sage: EllipticCurve('123a1').sha().an_padic(41) # rank 1, long time (3s on sage.math, 2011)
1 + O(41)
Supersingular cases::
sage: EllipticCurve('34a1').sha().an_padic(5) # rank 0
1 + O(5^22)
sage: EllipticCurve('53a1').sha().an_padic(5) # rank 1, long time (11s on sage.math, 2011)
1 + O(5)
Cases that use a twist to a lower conductor::
sage: EllipticCurve('99a1').sha().an_padic(5)
1 + O(5)
sage: EllipticCurve('240d3').sha().an_padic(5) # sha has 4 elements here
4 + O(5)
sage: EllipticCurve('448c5').sha().an_padic(7,prec=4, use_twists=False) # long time (2s on sage.math, 2011)
2 + 7 + O(7^6)
sage: EllipticCurve([-19,34]).sha().an_padic(5) # see trac #6455, long time (4s on sage.math, 2011)
1 + O(5)
Test for :trac:`15737`::
sage: E = EllipticCurve([-100,0])
sage: s = E.sha()
sage: s.an_padic(13)
1 + O(13^20)
"""
try:
return self.__an_padic[(p, prec)]
except AttributeError:
self.__an_padic = {}
except KeyError:
pass
E = self.Emin
tam = E.tamagawa_product()
tors = E.torsion_order()**2
r = E.rank()
if r > 0:
reg = E.padic_regulator(p)
else:
if E.is_supersingular(p):
reg = vector([Qp(p, 20)(1), 0])
else:
reg = Qp(p, 20)(1)
if use_twists and p > 2:
Et, D = E.minimal_quadratic_twist()
# trac 6455 : we have to assure that the twist back is allowed
D = ZZ(D)
if D % p == 0:
D = ZZ(D/p)
for ell in D.prime_divisors():
if ell % 2 == 1:
if Et.conductor() % ell**2 == 0:
D = ZZ(D/ell)
ve = valuation(D, 2)
de = ZZ((D/2**ve).abs())
if de % 4 == 3:
de = -de
Et = E.quadratic_twist(de)
# now check individually if we can twist by -1 or 2 or -2
Nmin = Et.conductor()
Dmax = de
for DD in [-4*de, 8*de, -8*de]:
Et = E.quadratic_twist(DD)
if Et.conductor() < Nmin and valuation(Et.conductor(), 2) <= valuation(DD, 2):
Nmin = Et.conductor()
Dmax = DD
D = Dmax
Et = E.quadratic_twist(D)
lp = Et.padic_lseries(p)
else:
lp = E.padic_lseries(p)
D = 1
if r == 0 and D == 1:
# short cut for rank 0 curves, we do not
# to compute the p-adic L-function, the leading
# term will be the L-value divided by the Neron
# period.
ms = E.modular_symbol(sign=+1, normalize='L_ratio')
lstar = ms(0)/E.real_components()
bsd = tam/tors
if prec == 0:
# prec = valuation(lstar/bsd, p)
prec = 20
shan = Qp(p, prec=prec + 2)(lstar/bsd)
elif E.is_ordinary(p):
K = reg.parent()
lg = log(K(1 + p))
if (E.is_good(p) or E.ap(p) == -1):
if not E.is_good(p):
eps = 2
else:
eps = (1 - arith.kronecker_symbol(D, p)/lp.alpha())**2
# according to the p-adic BSD this should be equal to the leading term of the p-adic L-series divided by sha:
bsdp = tam * reg * eps/tors/lg**r
else:
r += 1 # exceptional zero
eq = E.tate_curve(p)
Li = eq.L_invariant()
# according to the p-adic BSD (Mazur-Tate-Teitelbaum)
# this should be equal to the leading term of the p-adic L-series divided by sha:
bsdp = tam * reg * Li/tors/lg**r
v = bsdp.valuation()
if v > 0:
verbose("the prime is irregular for this curve.")
# determine how much prec we need to prove at least the
# triviality of the p-primary part of Sha
if prec == 0:
n = max(v, 2)
bounds = lp._prec_bounds(n, r + 1)
while bounds[r] <= v:
n += 1
bounds = lp._prec_bounds(n, r + 1)
verbose("set precision to %s" % n)
else:
n = max(2, prec)
not_yet_enough_prec = True
while not_yet_enough_prec:
lps = lp.series(n, quadratic_twist=D, prec=r + 1)
lstar = lps[r]
if (lstar != 0) or (prec != 0):
not_yet_enough_prec = False
else:
n += 1
verbose("increased precision to %s" % n)
shan = lstar/bsdp
elif E.is_supersingular(p):
K = reg[0].parent()
lg = log(K(1 + p))
# according to the p-adic BSD this should be equal to the leading term of the D_p - valued
# L-series :
bsdp = tam / tors / lg**r * reg
# note this is an element in Q_p^2
verbose("the algebraic leading terms : %s" % bsdp)
v = [bsdp[0].valuation(), bsdp[1].valuation()]
if prec == 0:
n = max(min(v) + 2, 3)
else:
n = max(3, prec)
verbose("...computing the p-adic L-series")
not_yet_enough_prec = True
while not_yet_enough_prec:
lps = lp.Dp_valued_series(n, quadratic_twist=D, prec=r + 1)
lstar = [lps[0][r], lps[1][r]]
verbose("the leading terms : %s" % lstar)
if (lstar[0] != 0 or lstar[1] != 0) or (prec != 0):
not_yet_enough_prec = False
else:
n += 1
verbose("increased precision to %s" % n)
verbose("...putting things together")
if bsdp[0] != 0:
shan0 = lstar[0]/bsdp[0]
else:
shan0 = 0 # this should actually never happen
if bsdp[1] != 0:
shan1 = lstar[1]/bsdp[1]
else:
shan1 = 0 # this should conjecturally only happen when the rank is 0
verbose("the two values for Sha : %s" % [shan0, shan1])
# check consistency (the first two are only here to avoid a bug in the p-adic L-series
# (namely the coefficients of zero-relative precision are treated as zero)
if shan0 != 0 and shan1 != 0 and shan0 - shan1 != 0:
raise RuntimeError("There must be a bug in the supersingular routines for the p-adic BSD.")
# take the better
if shan1 == 0 or shan0.precision_relative() > shan1.precision_relative():
shan = shan0
else:
shan = shan1
else:
raise ValueError("The curve has to have semi-stable reduction at p.")
self.__an_padic[(p, prec)] = shan
return shan
def p_primary_order(self, p):
r"""
Return the order of the `p`-primary part of the Tate-Shafarevich
group.
This uses the result of Skinner and Urban [SU2014]_ on the
main conjecture in Iwasawa theory. In particular the elliptic
curve must have good ordinary reduction at `p`, the residual
Galois representation must be surjective. Furthermore there must
be an auxiliary prime `\ell` dividing the conductor of the curve
exactly once such that the residual representation is ramified
at `p`.
INPUT:
- `p` -- an odd prime
OUTPUT:
- `e` -- a non-negative integer such that `p^e` is the
order of the `p`-primary order if the conditions are satisfied
and raises a ``ValueError`` otherwise.
EXAMPLES::
sage: E = EllipticCurve("389a1") # rank 2
sage: E.sha().p_primary_order(5)
0
sage: E = EllipticCurve("11a1")
sage: E.sha().p_primary_order(7)
0
sage: E.sha().p_primary_order(5)
Traceback (most recent call last):
...
ValueError: The order is not provably known using Skinner-Urban.
Try running p_primary_bound to get a bound.
"""
E = self.E
# does not work if p = 2
if p == 2:
raise ValueError("{} is not an odd prime".format(p))
if (E.is_ordinary(p) and E.conductor() % p != 0 and
E.galois_representation().is_surjective(p)):
N = E.conductor()
fac = N.factor()
# the auxiliary prime will be one dividing the conductor
if all(E.tate_curve(ell).parameter().valuation() % p == 0
for (ell, e) in fac if e == 1):
raise ValueError("The order is not provably known using Skinner-Urban.\n" +
"Try running p_primary_bound to get a bound.")
else:
raise ValueError("The order is not provably known using Skinner-Urban.\n" +
"Try running p_primary_bound to get a bound.")
return self.p_primary_bound(p)
def p_primary_bound(self, p):
r"""
Return a provable upper bound for the order of the
`p`-primary part `Sha(E)(p)` of the Tate-Shafarevich group.
INPUT:
- ``p`` -- a prime > 2
OUTPUT:
- ``e`` -- a non-negative integer such that `p^e` is an upper
bound for the order of `Sha(E)(p)`
In particular, if this algorithm does not fail, then it proves
that the `p`-primary part of `Sha` is finite. This works also
for curves of rank > 1.
Note also that this bound is sharp if one assumes the main conjecture
of Iwasawa theory of elliptic curves. One may use the method
``p_primary_order`` for checking if the extra conditions hold under
which the main conjecture is known by the work of Skinner and Urban.
This then returns the provable `p`-primary part of the Tate-Shafarevich
group,
Currently the algorithm is only implemented when the following
conditions are verified:
- The `p`-adic Galois representation must be surjective or
must have its image contained in a Borel subgroup.
- The reduction at `p` is not allowed to be additive.
- If the reduction at `p` is non-split multiplicative, then
the rank must be 0.
- If `p = 3`, then the reduction at 3 must be good ordinary or
split multiplicative, and the rank must be 0.
ALGORITHM:
The algorithm is described in [SW2013]_. The results for the
reducible case can be found in [Wu2004]_. The main ingredient is
Kato's result on the main conjecture in Iwasawa theory.
EXAMPLES::
sage: e = EllipticCurve('11a3')
sage: e.sha().p_primary_bound(3)
0
sage: e.sha().p_primary_bound(5)
0
sage: e.sha().p_primary_bound(7)
0
sage: e.sha().p_primary_bound(11)
0
sage: e.sha().p_primary_bound(13)
0
sage: e = EllipticCurve('389a1')
sage: e.sha().p_primary_bound(5)
0
sage: e.sha().p_primary_bound(7)
0
sage: e.sha().p_primary_bound(11)
0
sage: e.sha().p_primary_bound(13)
0
sage: e = EllipticCurve('858k2')
sage: e.sha().p_primary_bound(3) # long time (10s on sage.math, 2011)
0
Some checks for :trac:`6406` and :trac:`16959`::
sage: e.sha().p_primary_bound(7) # long time
2
sage: E = EllipticCurve('608b1')
sage: E.sha().p_primary_bound(5)
Traceback (most recent call last):
...
ValueError: The p-adic Galois representation is not surjective or reducible. Current knowledge about Euler systems does not provide an upper bound in this case. Try an_padic for a conjectural bound.
sage: E.sha().an_padic(5) # long time
1 + O(5^22)
sage: E = EllipticCurve("5040bi1")
sage: E.sha().p_primary_bound(5) # long time
0
"""
p = Integer(p)
if p == 2:
raise ValueError("The prime p must be odd.")
E = self.Emin
if E.is_ordinary(p) or E.is_good(p):
rho = E.galois_representation()
su = rho.is_surjective(p)
re = rho.is_reducible(p)
if not su and not re:
raise ValueError("The p-adic Galois representation is not surjective or reducible. Current knowledge about Euler systems does not provide an upper bound in this case. Try an_padic for a conjectural bound.")
shan = self.an_padic(p, prec=0, use_twists=True)
if shan == 0:
raise RuntimeError("There is a bug in an_padic.")
S = shan.valuation()
else:
raise ValueError("The curve has to have semi-stable reduction at p.")
return S
def two_selmer_bound(self):
r"""
This returns the 2-rank, i.e. the `\GF{2}`-dimension
of the 2-torsion part of `Sha`, provided we can determine the
rank of `E`.
EXAMPLES::
sage: sh = EllipticCurve('571a1').sha()
sage: sh.two_selmer_bound()
2
sage: sh.an()
4
sage: sh = EllipticCurve('66a1').sha()
sage: sh.two_selmer_bound()
0
sage: sh.an()
1
sage: sh = EllipticCurve('960d1').sha()
sage: sh.two_selmer_bound()
2
sage: sh.an()
4
"""
E = self.Emin
S = E.selmer_rank()
r = E.rank()
t = E.two_torsion_rank()
b = S - r - t
if b < 0:
b = 0
return b
def bound_kolyvagin(self, D=0, regulator=None,
ignore_nonsurj_hypothesis=False):
r"""
Given a fundamental discriminant `D \neq -3,-4` that satisfies the
Heegner hypothesis for `E`, return a list of primes so that
Kolyvagin's theorem (as in Gross's paper) implies that any
prime divisor of `Sha` is in this list.
INPUT:
- ``D`` - (optional) a fundamental discriminant < -4 that satisfies
the Heegner hypothesis for `E`; if not given, use the first such `D`
- ``regulator`` -- (optional) regulator of `E(K)`; if not given, will
be computed (which could take a long time)
- ``ignore_nonsurj_hypothesis`` (optional: default ``False``) --
If ``True``, then gives the bound coming from Heegner point
index, but without any hypothesis on surjectivity
of the mod-`p` representation.
OUTPUT:
- list -- a list of primes such that if `p` divides `Sha(E/K)`, then
`p` is in this list, unless `E/K` has complex multiplication or
analytic rank greater than 2 (in which case we return 0).
- index -- the odd part of the index of the Heegner point in the full
group of `K`-rational points on E. (If `E` has CM, returns 0.)
REMARKS:
1) We do not have to assume that the Manin constant is 1
(or a power of 2). If the Manin constant were
divisible by a prime, that prime would get included in
the list of bad primes.
2) We assume the Gross-Zagier theorem is true under the
hypothesis that `gcd(N,D) = 1`, instead of the stronger
hypothesis `gcd(2\cdot N,D)=1` that is in the original
Gross-Zagier paper. That Gross-Zagier is true when
`gcd(N,D)=1` is "well-known" to the experts, but does not
seem to written up well in the literature.
3) Correctness of the computation is guaranteed using
interval arithmetic, under the assumption that the
regulator, square root, and period lattice are
computed to precision at least `10^{-10}`, i.e., they are
correct up to addition or a real number with absolute
value less than `10^{-10}`.
EXAMPLES::
sage: E = EllipticCurve('37a')
sage: E.sha().bound_kolyvagin()
([2], 1)
sage: E = EllipticCurve('141a')
sage: E.sha().an()
1
sage: E.sha().bound_kolyvagin()
([2, 7], 49)
We get no information when the curve has rank 2.::
sage: E = EllipticCurve('389a')
sage: E.sha().bound_kolyvagin()
(0, 0)
sage: E = EllipticCurve('681b')
sage: E.sha().an()
9
sage: E.sha().bound_kolyvagin()
([2, 3], 9)
"""
E = self.Emin
if E.has_cm():
return 0, 0
if D == 0:
D = -5
while not E.satisfies_heegner_hypothesis(D):
D -= 1
if not E.satisfies_heegner_hypothesis(D):
raise ArithmeticError("Discriminant (=%s) must be a fundamental discriminant that satisfies the Heegner hypothesis." % D)
if D == -3 or D == -4:
raise ArithmeticError("Discriminant (=%s) must not be -3 or -4." % D)
eps = E.root_number()
L1_vanishes = E.lseries().L1_vanishes()
if eps == 1 and L1_vanishes:
return 0, 0 # rank even hence >= 2, so Kolyvagin gives nothing.
alpha = sqrt(abs(D)) / (2*E.period_lattice().complex_area())
F = E.quadratic_twist(D)
k_E = 2*sqrt(E.conductor()) + 10
k_F = 2*sqrt(F.conductor()) + 10
# k_E = 2
# k_F = 2
MIN_ERR = 1e-10
# we assume that regulator and
# discriminant, etc., computed to this accuracy.
tries = 0
while True:
tries += 1
if tries >= 6:
raise RuntimeError("Too many precision increases in bound_kolyvagin")
if eps == 1: # E has even rank
verbose("Conductor of twist = %s" % F.conductor())
LF1, err_F = F.lseries().deriv_at1(k_F)
LE1, err_E = E.lseries().at1(k_E)
err_F = max(err_F, MIN_ERR)
err_E = max(err_E, MIN_ERR)
if regulator is not None:
hZ = regulator/2
else:
hZ = F.regulator(use_database=True)/2
I = RIF(alpha) * RIF(LE1-err_E, LE1+err_E) * RIF(LF1-err_F, LF1+err_F) / hZ
else: # E has odd rank
if regulator is not None:
hZ = regulator/2
else:
hZ = E.regulator(use_database=True)/2
LE1, err_E = E.lseries().deriv_at1(k_E)
LF1, err_F = F.lseries().at1(k_F)
err_F = max(err_F, MIN_ERR)
err_E = max(err_E, MIN_ERR)
# I = alpha * LE1 * LF1 / hZ
I = RIF(alpha) * RIF(LE1-err_E, LE1+err_E) * RIF(LF1-err_F, LF1+err_F) / hZ
verbose('interval = %s' % I)
t, n = I.is_int()
if t:
break
elif I.absolute_diameter() < 1:
raise RuntimeError("Problem in bound_kolyvagin; square of index is not an integer -- D=%s, I=%s." % (D, I))
verbose("Doubling bounds")
k_E *= 2
k_F *= 2
# end while
# We include 2 since Kolyvagin (in Gross) says nothing there
if n == 0:
return 0, 0 # no bound
B = [2]
for p, e in factor(n):
if p > 2:
if e % 2:
raise RuntimeError("Problem in bound_kolyvagin; square of index is not a perfect square! D=%s, I=%s, n=%s, e=%s." % (D, I, n, e))
B.append(p)
else:
n /= 2**e # replace n by its odd part
if not ignore_nonsurj_hypothesis:
for p in E.galois_representation().non_surjective():
B.append(p)
B = sorted(set(int(x) for x in B))
return B, n
def bound_kato(self):
r"""
Returns a list of primes `p` such that the theorems of Kato's [Kat2004]_
and others (e.g., as explained in a thesis of Grigor Grigorov [Gri2005]_)
imply that if `p` divides the order of `Sha(E/\QQ)` then `p` is in
the list.
If `L(E,1) = 0`, then this function gives no information, so
it returns ``False``.
THEOREM: Suppose `L(E,1) \neq 0` and `p \neq 2` is a prime such
that
- `E` does not have additive reduction at `p`,
- either the `p`-adic representation is surjective or has its
image contained in a Borel subgroup.
Then `{ord}_p(\#Sha(E))` is bounded from above by the `p`-adic valuation of `L(E,1)\cdot\#E(\QQ)_{tor}^2 / (\Omega_E \cdot \prod c_v)`.
If the L-series vanishes, the method ``p_primary_bound`` can be used instead.
EXAMPLES::
sage: E = EllipticCurve([0, -1, 1, -10, -20]) # 11A = X_0(11)
sage: E.sha().bound_kato()
[2]
sage: E = EllipticCurve([0, -1, 1, 0, 0]) # X_1(11)
sage: E.sha().bound_kato()
[2]
sage: E = EllipticCurve([1,1,1,-352,-2689]) # 66B3
sage: E.sha().bound_kato()
[2]
For the following curve one really has that 25 divides the
order of `Sha` (by [GJPST2009]_)::
sage: E = EllipticCurve([1, -1, 0, -332311, -73733731]) # 1058D1
sage: E.sha().bound_kato() # long time (about 1 second)
[2, 5, 23]
sage: E.galois_representation().non_surjective() # long time (about 1 second)
[]
For this one, `Sha` is divisible by 7::
sage: E = EllipticCurve([0, 0, 0, -4062871, -3152083138]) # 3364C1
sage: E.sha().bound_kato() # long time (< 10 seconds)
[2, 7, 29]
No information about curves of rank > 0::
sage: E = EllipticCurve([0, 0, 1, -1, 0]) # 37A (rank 1)
sage: E.sha().bound_kato()
False
"""
E = self.Emin
if E.has_cm():
return False
if E.lseries().L1_vanishes():
return False
B = [2]
rho = E.galois_representation()
for p in rho.non_surjective():
if p > 2 and p not in rho.reducible_primes():
B.append(p)
for p in E.conductor().prime_divisors():
if E.has_additive_reduction(p) and p not in B:
B.append(p)
# The only other p that might divide B are those that divide
# the integer 2*#E(Q)_tor^2 * L(E,1)/omega. So we compute
# that to sufficient precision to determine it. Note that
# we have to assume the Manin constant is <=2 in order to provably
# compute L(E,1)/omega.
for p, n in factor(self.an()):
if n >= 2: # use parity of Sha
B.append(int(p))
B = sorted(set(B))
return B
def bound(self):
r"""
Compute a provably correct bound on the order of the Tate-Shafarevich
group of this curve. The bound is either ``False`` (no bound) or a
list ``B`` of primes such that any prime divisor of the order of `Sha`
is in this list.
EXAMPLES::
sage: EllipticCurve('37a').sha().bound()
([2], 1)
"""
if self.Emin.lseries().L1_vanishes():
B = self.bound_kolyvagin()
else:
B = self.bound_kato()
return B
| 36.772727
| 222
| 0.534529
|
634a3ef5a0cf24593bf8d998fb8b39b389b229ca
| 1,344
|
py
|
Python
|
tests/002_finder/001_ispartial.py
|
Sam-prog-sudo/boussole
|
5d6ec94356f9a91ff4d6d23c1700d3512b67006a
|
[
"MIT"
] | 13
|
2016-05-19T15:18:41.000Z
|
2022-03-22T15:37:32.000Z
|
tests/002_finder/001_ispartial.py
|
Sam-prog-sudo/boussole
|
5d6ec94356f9a91ff4d6d23c1700d3512b67006a
|
[
"MIT"
] | 38
|
2016-04-07T00:30:58.000Z
|
2022-02-28T13:29:33.000Z
|
tests/002_finder/001_ispartial.py
|
Sam-prog-sudo/boussole
|
5d6ec94356f9a91ff4d6d23c1700d3512b67006a
|
[
"MIT"
] | 3
|
2016-05-20T09:21:57.000Z
|
2020-10-12T10:56:49.000Z
|
# -*- coding: utf-8 -*-
def test_001(settings, finder):
"""finder.ScssFinder: Simple filename"""
partial = finder.is_partial("foo.scss")
assert partial is False
def test_002(settings, finder):
"""finder.ScssFinder: Simple partial filename"""
partial = finder.is_partial("_foo.scss")
assert partial
def test_003(settings, finder):
"""finder.ScssFinder: Simple relative filename"""
partial = finder.is_partial("bar/foo.scss")
assert partial is False
def test_004(settings, finder):
"""finder.ScssFinder: Simple relative partial filename"""
partial = finder.is_partial("bar/_foo.scss")
assert partial
def test_005(settings, finder):
"""finder.ScssFinder: Simple relative filename again"""
partial = finder.is_partial("bar/plop/foo.scss")
assert partial is False
def test_006(settings, finder):
"""finder.ScssFinder: Simple relative partial filename again"""
partial = finder.is_partial("bar/plop/_foo.scss")
assert partial
def test_007(settings, finder):
"""finder.ScssFinder: Simple absolute filename"""
partial = finder.is_partial("/home/bar/foo.scss")
assert partial is False
def test_008(settings, finder):
"""finder.ScssFinder: Simple absolute partial filename"""
partial = finder.is_partial("/home/bar/_foo.scss")
assert partial
| 26.88
| 67
| 0.703869
|
1a014bcdd8d3d0ce808f5f540f7f31b1680ab2e4
| 477
|
py
|
Python
|
libpythonpro/tests/test_spam/test_usuarios.py
|
smkbarbosa/libpythonpro
|
001a3c470b00a93ffa0180ae0c1bbb32fee04993
|
[
"Apache-2.0"
] | 1
|
2022-01-23T06:41:05.000Z
|
2022-01-23T06:41:05.000Z
|
libpythonpro/tests/test_spam/test_usuarios.py
|
smkbarbosa/libpythonpro
|
001a3c470b00a93ffa0180ae0c1bbb32fee04993
|
[
"Apache-2.0"
] | 40
|
2020-12-25T17:58:26.000Z
|
2022-03-19T00:36:16.000Z
|
libpythonpro/tests/test_spam/test_usuarios.py
|
smkbarbosa/libpythonpro
|
001a3c470b00a93ffa0180ae0c1bbb32fee04993
|
[
"Apache-2.0"
] | null | null | null |
from libpythonpro.spam.models import Usuario
def test_salvar_usuario(sessao):
usuario = Usuario(nome='Samuel', email='samuka1@gmail.com')
sessao.salvar(usuario)
assert isinstance(usuario.id, int)
def test_listar_usuario(sessao):
usuarios = [Usuario(nome='Samuel', email='samuka1@gmail.com'),
Usuario(nome='Dante', email='samuka1@gmail.com')]
for usuario in usuarios:
sessao.salvar(usuario)
assert usuarios == sessao.listar()
| 29.8125
| 66
| 0.696017
|
6d2bc0e0ad601ac6a5996245794693c2cb0497fc
| 3,089
|
py
|
Python
|
src/robot/utils/recommendations.py
|
bhirsz/robotframework
|
d62ee5091ed932aee8fc12ae5e340a5b19288f05
|
[
"ECL-2.0",
"Apache-2.0"
] | 9
|
2020-04-22T08:30:52.000Z
|
2020-12-07T08:25:09.000Z
|
src/robot/utils/recommendations.py
|
bhirsz/robotframework
|
d62ee5091ed932aee8fc12ae5e340a5b19288f05
|
[
"ECL-2.0",
"Apache-2.0"
] | 63
|
2020-03-04T17:31:39.000Z
|
2022-03-01T09:12:16.000Z
|
src/robot/utils/recommendations.py
|
bhirsz/robotframework
|
d62ee5091ed932aee8fc12ae5e340a5b19288f05
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2018-11-30T18:49:45.000Z
|
2018-11-30T18:49:45.000Z
|
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import difflib
class RecommendationFinder(object):
def __init__(self, normalizer=None):
self.normalizer = normalizer or (lambda x: x)
self.recommendations = None
def find_and_format(self, name, candidates, message, max_matches=10):
self.find(name, candidates, max_matches)
return self.format(message)
def find(self, name, candidates, max_matches=10):
"""Return a list of close matches to `name` from `candidates`."""
if not name or not candidates:
return []
norm_name = self.normalizer(name)
norm_candidates = self._get_normalized_candidates(candidates)
cutoff = self._calculate_cutoff(norm_name)
norm_matches = difflib.get_close_matches(
norm_name, norm_candidates, n=max_matches, cutoff=cutoff
)
self.recommendations = self._get_original_candidates(
norm_candidates, norm_matches
)
return self.recommendations
def format(self, message, recommendations=None):
"""Add recommendations to the given message.
The recommendation string looks like::
<message> Did you mean:
<recommendations[0]>
<recommendations[1]>
<recommendations[2]>
"""
recommendations = recommendations or self.recommendations
if recommendations:
message += " Did you mean:"
for rec in recommendations:
message += "\n %s" % rec
return message
def _get_normalized_candidates(self, candidates):
norm_candidates = {}
# sort before normalization for consistent Python/Jython ordering
for cand in sorted(candidates):
norm = self.normalizer(cand)
norm_candidates.setdefault(norm, []).append(cand)
return norm_candidates
def _get_original_candidates(self, norm_candidates, norm_matches):
candidates = []
for norm_match in norm_matches:
candidates.extend(norm_candidates[norm_match])
return candidates
def _calculate_cutoff(self, string, min_cutoff=.5, max_cutoff=.85,
step=.03):
"""Calculate a cutoff depending on string length.
Default values determined by manual tuning until the results
"look right".
"""
cutoff = min_cutoff + len(string) * step
return min(cutoff, max_cutoff)
| 36.77381
| 75
| 0.656847
|
5a1cafe66cb1b394a1dc9665a4ae3454f93a3bd1
| 2,350
|
py
|
Python
|
tests/cpd_als_benchmark_test.py
|
ByzanTine/AutoHOOT
|
007bb423bfc8eefa64e4d1b0f8dad80b440bcf7a
|
[
"Apache-2.0"
] | null | null | null |
tests/cpd_als_benchmark_test.py
|
ByzanTine/AutoHOOT
|
007bb423bfc8eefa64e4d1b0f8dad80b440bcf7a
|
[
"Apache-2.0"
] | null | null | null |
tests/cpd_als_benchmark_test.py
|
ByzanTine/AutoHOOT
|
007bb423bfc8eefa64e4d1b0f8dad80b440bcf7a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import autodiff as ad
import backend as T
import pytest
import tensorly as tl
from tensors.synthetic_tensors import init_rand_cp
from examples.cpd import cpd_als, cpd_als_shared_exec
from tensorly.decomposition import parafac
from sktensor import dtensor
from sktensor import cp_als as sk_cp_als
BACKEND_TYPES = ['numpy']
size, rank = 150, 150
dim = 3
@pytest.mark.benchmark(group="cp_als")
def test_cpd_als_tensorly(benchmark):
for datatype in BACKEND_TYPES:
tl.set_backend(datatype)
assert tl.get_backend() == datatype
_, input_tensor_val = init_rand_cp(dim, size, rank)
input_tensor = tl.tensor(input_tensor_val, dtype='float64')
factors = benchmark(parafac,
input_tensor,
rank=rank,
init='random',
tol=0,
n_iter_max=1,
verbose=0)
@pytest.mark.benchmark(group="cp_als")
def test_cpd_als_sktensor(benchmark):
for datatype in BACKEND_TYPES:
_, input_tensor_val = init_rand_cp(dim, size, rank)
benchmark(sk_cp_als,
dtensor(input_tensor_val),
rank=rank,
max_iter=1,
init='random')
@pytest.mark.benchmark(group="cp_als")
def test_cpd_als(benchmark):
for datatype in BACKEND_TYPES:
input_tensor = init_rand_cp(dim, size, rank)
outputs = benchmark(cpd_als, dim, size, rank, 1, input_tensor)
@pytest.mark.benchmark(group="cp_als")
def test_cpd_als_shared_exec(benchmark):
for datatype in BACKEND_TYPES:
input_tensor = init_rand_cp(dim, size, rank)
outputs = benchmark(cpd_als_shared_exec, dim, size, rank, 1, input_tensor)
| 32.638889
| 82
| 0.670213
|
955d15b0e239cf3d3e662cb7e3fff641f7ff4e7b
| 1,017
|
py
|
Python
|
tests/test_metrics.py
|
danirivas/cova-tuner
|
e7eaf7e75f0c15ce35c449fb67529c9c73386817
|
[
"Apache-2.0"
] | 1
|
2022-03-04T09:34:00.000Z
|
2022-03-04T09:34:00.000Z
|
tests/test_metrics.py
|
danirivas/cova-tuner
|
e7eaf7e75f0c15ce35c449fb67529c9c73386817
|
[
"Apache-2.0"
] | 1
|
2021-12-15T14:52:58.000Z
|
2021-12-15T14:52:58.000Z
|
tests/test_metrics.py
|
danirivas/cova-tuner
|
e7eaf7e75f0c15ce35c449fb67529c9c73386817
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from cova.dnn import metrics
@pytest.mark.parametrize(
("bb1", "bb2", "overlap"),
[
[[0, 0, 100, 100], [0, 0, 100, 100], 1.0],
[[0, 0, 50, 50], [0, 0, 100, 100], 1],
[[0, 0, 100, 100], [0, 0, 50, 50], 0.25],
[[0, 0, 100, 100], [0, 0, 100, 50], 0.5],
[[0, 0, 50, 50], [50, 50, 100, 100], 0],
[[100, 100, 200, 200], [150, 150, 160, 160], 0.01],
],
)
def test_get_overlap(bb1, bb2, overlap):
assert metrics.get_overlap(bb1, bb2) == overlap
@pytest.mark.parametrize(
("bb1", "bb2", "iou"),
[
[[0, 0, 100, 100], [0, 0, 100, 100], 1.0],
[[0, 0, 50, 50], [0, 0, 100, 100], 0.25],
[[0, 0, 50, 50], [50, 50, 100, 100], 0],
],
)
def test_get_iou(bb1, bb2, iou):
assert metrics.get_iou(bb1, bb2)[0] == iou
@pytest.mark.parametrize(("bb1", "bb2"), [["1", []], [[0, 0, 10, 10], [0, 10, 10, 0]]])
def test_get_iou_assert(bb1, bb2):
with pytest.raises(AssertionError):
metrics.get_iou(bb1, bb2)
| 27.486486
| 87
| 0.495575
|
12ed84fb4b49cbed967ff38e96145df084380d22
| 122
|
py
|
Python
|
phishsense/modelqueue/forms.py
|
sidin/phishsense
|
34ec2a6659f2e884fc36c01c80776b161da0be07
|
[
"BSD-3-Clause"
] | null | null | null |
phishsense/modelqueue/forms.py
|
sidin/phishsense
|
34ec2a6659f2e884fc36c01c80776b161da0be07
|
[
"BSD-3-Clause"
] | null | null | null |
phishsense/modelqueue/forms.py
|
sidin/phishsense
|
34ec2a6659f2e884fc36c01c80776b161da0be07
|
[
"BSD-3-Clause"
] | null | null | null |
from django import forms
class URLForm(forms.Form):
analysis_url = forms.URLField(label='Analysis URL', required=True)
| 20.333333
| 67
| 0.778689
|
6a4af8f83be3d75c1b0689679e117513b08ff28f
| 7,006
|
py
|
Python
|
test/test_full_report_schema_all_of.py
|
intel471/titan-client-python
|
b12a2bc73604cf1a7cb0b6e97c81b5af9dee7bfe
|
[
"MIT"
] | 2
|
2021-08-23T08:41:44.000Z
|
2021-08-29T15:09:27.000Z
|
test/test_full_report_schema_all_of.py
|
intel471/titan-client-python
|
b12a2bc73604cf1a7cb0b6e97c81b5af9dee7bfe
|
[
"MIT"
] | 1
|
2021-09-16T18:12:22.000Z
|
2021-09-22T16:12:11.000Z
|
test/test_full_report_schema_all_of.py
|
intel471/titan-client-python
|
b12a2bc73604cf1a7cb0b6e97c81b5af9dee7bfe
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Titan API v1
# Introduction The Intel 471 API is organized around the principles of REST. Our API lets you gather results from our platform with anything that can send a HTTP request, including cURL and modern internet browsers. Access to this API requires an API token which is managed from your account settings. Intel 471 reserves the right to add fields to our API however we will provide backwards compatibility and older version support so that it will be possible to choose exact versions that provide a response with an older structure. This documentation tracks all API versions and it is possible to compare this version which has changes highlighted. Please consider not storing information provided by API locally as we constantly improving our data set and want you to have the most updated information. # Authentication Authenticate to the Intel 471 API by providing your API key in the request. Your API key carries many privileges so please do not expose them on public web resources. Authentication to the API occurs by providing your email address as the login and API key as password in the authorization header via HTTP Basic Auth. Your API key can be found in the [API](https://portal.intel471.com/api) section on the portal. # Accessing API ## Via internet browser Just open url: `https://api.intel471.com/v1/reports` Browser will ask for credentials, provide your email as login and API key as password. ## Via curl command line utility Type in terminal the following command: ``` curl -u <YOU EMAIL>:<YOUR API KEY> https://api.intel471.com/v1/reports ``` ## CURL usage examples This section covers some Watchers API requests. ### List watcher groups: Type in terminal the following command: *curl -u \"YOUR EMAIL\":\"YOUR API KEY\" https://api.intel471.com/v1/watcherGroups* ### Create watcher group: To create watcher group you need to pass a json body to request. Passing json body possible in two ways: #### Write json to request *curl -d'{\"name\": \"group_name\", \"description\": \"Description\"}' -X POST -u \"YOUR EMAIL\":\"YOUR API KEY\" https://api.intel471.com/v1/watcherGroups* #### Write json to file and call it *curl -d\"@json_file_name\" -X POST -u \"YOUR EMAIL\":\"YOUR API KEY\" https://api.intel471.com/v1/watcherGroups* ### Create free text search watcher: *curl -d'{\"type\": \"search\", \"freeTextPattern\": \"text to search\", \"notificationChannel\": \"website\"}' -X POST -u \"YOUR EMAIL\":\"YOUR API KEY\" https://api.intel471.com/v1/watcherGroups/\"GROUP UID\"/watchers* ### Create specific search watcher: *curl -d'{\"type\": \"search\", \"patterns\":[ { \"types\": \"Actor\" , \"pattern\": \"swisman\" } ], \"notificationChannel\": \"website\" }' -X POST -u \"YOUR EMAIL\":\"YOUR API KEY\" https://api.intel471.com/v1/watcherGroups/\"GROUP UID\"/watchers* ## Via Python Execute the following script: ``` import urllib2, base64 username = \"<YOU EMAIL>\" apikey = \"<YOUR API KEY>\" request = urllib2.Request(\"https://api.intel471.com/v1/reports\") base64string = base64.encodestring('%s:%s' % (username, apikey)).replace('\\n', '') request.add_header(\"Authorization\", \"Basic %s\" % base64string) result = urllib2.urlopen(request) response_in_json = result.read() print response_in_json ``` # API integration best practice with your application When accessing our API from your application don't do AJAX calls directly from web browser to https://api.intel471.com/. We do not allow CORS requests from browser due to potential security issues. Instead we suggest you look to establish a kind of a server side proxy in your application which will pass requests to our API. For example: you can send a request from browser javascript to your server side, for instance to url `/apiproxy/actors?actor=hacker` which will be internally passed to `https://api.intel471.com/v1/actors?actor=hacker` (with authentication headers added) and response will be sent back to the browser. # Versioning support We are consistently improving our API and occasionally bring in changes to the API based on customer feedback. The current API version can be seen in the drop down boxes for each version. We are providing API backwards compatibility when possible. All requests are prefixed with the major version number, for example `/v1`: ``` https://api.intel471.com/v1/reports ``` Different major versions are not compatible and imply significant response structure changes. Minor versions differences might include extra fields in response or provide new request parameter support. To stick to the specific version, just add the following extra parameter to the request, for example: `?v=1.2.0`. If you specify a not existing version, it will be brought down to the nearest existing one. For example, parameter `?v=1.5.4` will call API of version 1.3.0 — the latest available; `?v=1.2.9` will awake version 1.2.0 and so on. Omitting the version parameter from your request means you will always use the latest version of the API. We highly recommend you always add the version parameter to be safe on API updates and code your integration in a way to accept possible future extra fields added to the response object. ``` https://api.intel471.com/v1/tags?prettyPrint - will return response for the latest API version (v.1.1.0) https://api.intel471.com/v1/tags?prettyPrint&v=1.1.0 - absolutely the same request with the version explicitly specified https://api.intel471.com/v1/reports?prettyPrint&v=1.0.0 - will return response compatible with the older version ``` # noqa: E501
The version of the OpenAPI document: 1.18.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import titan_client
from titan_client.models.full_report_schema_all_of import FullReportSchemaAllOf # noqa: E501
from titan_client.rest import ApiException
class TestFullReportSchemaAllOf(unittest.TestCase):
"""FullReportSchemaAllOf unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test FullReportSchemaAllOf
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = titan_client.models.full_report_schema_all_of.FullReportSchemaAllOf() # noqa: E501
if include_optional :
return FullReportSchemaAllOf(
raw_text = '',
raw_text_translated = '',
researcher_comments = ''
)
else :
return FullReportSchemaAllOf(
raw_text = '',
)
def testFullReportSchemaAllOf(self):
"""Test FullReportSchemaAllOf"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| 127.381818
| 5,526
| 0.738224
|
d2a0621dfc4b5a82a4bd263afbfd7a31e9c00b58
| 47
|
py
|
Python
|
jaseci_kit/jaseci_kit/use_enc.py
|
Gorgeous-Patrick/jaseci
|
b423165fefbbc9574cd4467ee05728add7f47e5a
|
[
"MIT"
] | 6
|
2021-10-30T03:35:36.000Z
|
2022-02-10T02:06:18.000Z
|
jaseci_kit/jaseci_kit/use_enc.py
|
Gorgeous-Patrick/jaseci
|
b423165fefbbc9574cd4467ee05728add7f47e5a
|
[
"MIT"
] | 85
|
2021-10-29T22:47:39.000Z
|
2022-03-31T06:11:52.000Z
|
jaseci_kit/jaseci_kit/use_enc.py
|
Gorgeous-Patrick/jaseci
|
b423165fefbbc9574cd4467ee05728add7f47e5a
|
[
"MIT"
] | 12
|
2021-11-03T17:29:22.000Z
|
2022-03-30T16:01:53.000Z
|
from .modules.use_enc.use_enc import * # noqa
| 23.5
| 46
| 0.744681
|
52091e4c08e04730fa8855eec21e9f7041fb964b
| 890
|
py
|
Python
|
setup.py
|
AnnikaCodes/ps-client
|
3abd518491b0b06a2463a1f251d4effebf468ef5
|
[
"MIT"
] | 2
|
2020-07-03T18:19:25.000Z
|
2020-07-27T03:43:03.000Z
|
setup.py
|
AnnikaCodes/ps-client
|
3abd518491b0b06a2463a1f251d4effebf468ef5
|
[
"MIT"
] | null | null | null |
setup.py
|
AnnikaCodes/ps-client
|
3abd518491b0b06a2463a1f251d4effebf468ef5
|
[
"MIT"
] | null | null | null |
"""setup.py
sets up the module
by Annika, template from https://packaging.python.org/tutorials/packaging-projects/"""
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="ps-client",
version="0.0.8",
author="Annika",
author_email="annika0uwu@gmail.com",
description="A package for interactions with the Pokémon Showdown simulator.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/AnnikaCodes/ps-client",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
"pytz",
"requests",
"websockets"
],
python_requires='>=3.6',
)
| 27.8125
| 90
| 0.653933
|
370d309157fe8ad9d484c2b9de43c1de7cdececc
| 7,899
|
py
|
Python
|
backend/fireflame_34285/settings.py
|
crowdbotics-apps/fireflame-34285
|
95d1c69b1210cbc9853704e014971823f8df25a8
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/fireflame_34285/settings.py
|
crowdbotics-apps/fireflame-34285
|
95d1c69b1210cbc9853704e014971823f8df25a8
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/fireflame_34285/settings.py
|
crowdbotics-apps/fireflame-34285
|
95d1c69b1210cbc9853704e014971823f8df25a8
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
"""
Django settings for fireflame_34285 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import io
import environ
import logging
import google.auth
from google.cloud import secretmanager
from google.auth.exceptions import DefaultCredentialsError
from google.api_core.exceptions import PermissionDenied
from modules.manifest import get_modules
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
env_file = os.path.join(BASE_DIR, ".env")
env = environ.Env()
env.read_env(env_file)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
try:
# Pull secrets from Secret Manager
_, project = google.auth.default()
client = secretmanager.SecretManagerServiceClient()
settings_name = os.environ.get("SETTINGS_NAME", "django_settings")
name = client.secret_version_path(project, settings_name, "latest")
payload = client.access_secret_version(name=name).payload.data.decode("UTF-8")
env.read_env(io.StringIO(payload))
except (DefaultCredentialsError, PermissionDenied):
pass
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
]
MODULES_APPS = get_modules()
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS + MODULES_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'fireflame_34285.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'fireflame_34285.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
# GCP config
GS_BUCKET_NAME = env.str("GS_BUCKET_NAME", "")
if GS_BUCKET_NAME:
DEFAULT_FILE_STORAGE = "storages.backends.gcloud.GoogleCloudStorage"
STATICFILES_STORAGE = "storages.backends.gcloud.GoogleCloudStorage"
GS_DEFAULT_ACL = "publicRead"
| 30.380769
| 112
| 0.736802
|
8758b24b7a4ee53ad8353c79ad018f8764fe8dba
| 19,528
|
py
|
Python
|
service/surf/vendor/elasticsearch/api.py
|
nppo/search-portal
|
aedf21e334f178c049f9d6cf37cafd6efc07bc0d
|
[
"MIT"
] | 1
|
2022-01-10T00:26:12.000Z
|
2022-01-10T00:26:12.000Z
|
service/surf/vendor/elasticsearch/api.py
|
nppo/search-portal
|
aedf21e334f178c049f9d6cf37cafd6efc07bc0d
|
[
"MIT"
] | 48
|
2021-11-11T13:43:09.000Z
|
2022-03-30T11:33:37.000Z
|
service/surf/vendor/elasticsearch/api.py
|
nppo/search-portal
|
aedf21e334f178c049f9d6cf37cafd6efc07bc0d
|
[
"MIT"
] | null | null | null |
import boto3
from collections import defaultdict
from django.conf import settings
from elasticsearch import Elasticsearch, RequestsHttpConnection
from requests_aws4auth import AWS4Auth
from project.configuration import SEARCH_FIELDS
from surf.vendor.elasticsearch.serializers import SearchResultSerializer
class ElasticSearchApiClient:
def __init__(self, elastic_url=settings.ELASTICSEARCH_HOST):
protocol = settings.ELASTICSEARCH_PROTOCOL
protocol_config = {}
if protocol == "https":
protocol_config = {
"scheme": "https",
"port": 443,
"use_ssl": True,
"verify_certs": settings.ELASTICSEARCH_VERIFY_CERTS,
}
if settings.IS_AWS:
credentials = boto3.Session().get_credentials()
http_auth = AWS4Auth(credentials.access_key, credentials.secret_key, "eu-central-1", "es",
session_token=credentials.token)
else:
http_auth = (None, None)
self.client = Elasticsearch(
[elastic_url],
http_auth=http_auth,
connection_class=RequestsHttpConnection,
**protocol_config
)
self.index_nl = settings.ELASTICSEARCH_NL_INDEX
self.index_en = settings.ELASTICSEARCH_EN_INDEX
self.index_unk = settings.ELASTICSEARCH_UNK_INDEX
self.languages = {
"nl": self.index_nl,
"en": self.index_en
}
@staticmethod
def parse_elastic_result(search_result):
"""
Parses the elasticsearch search result into the format that is also used by the edurep endpoint.
This allows quick switching between elastic and edurep without changing code.
:param search_result: result from elasticsearch
:return result: list of results in edurep format
"""
hits = search_result.pop("hits")
aggregations = search_result.get("aggregations", {})
result = dict()
result['recordcount'] = hits['total']['value']
# Transform aggregations into drilldowns
drilldowns = []
for aggregation_name, aggregation in aggregations.items():
buckets = aggregation["filtered"]["buckets"] if "filtered" in aggregation else aggregation["buckets"]
items = [
{
"external_id": bucket["key"],
"count": bucket["doc_count"]
}
for bucket in buckets
]
drilldowns.append({
"external_id": aggregation_name,
"items": items
})
result['drilldowns'] = drilldowns
# Parse spelling suggestions
did_you_mean = {}
if 'suggest' in search_result:
spelling_suggestion = search_result['suggest']['did-you-mean-suggestion'][0]
spelling_option = spelling_suggestion['options'][0] if len(spelling_suggestion['options']) else None
if spelling_option is not None and spelling_option["score"] >= 0.01:
did_you_mean = {
'original': spelling_suggestion['text'],
'suggestion': spelling_option['text']
}
result['did_you_mean'] = did_you_mean
# Transform hits into records
result['records'] = [
ElasticSearchApiClient.parse_elastic_hit(hit)
for hit in hits['hits']
]
return result
@staticmethod
def parse_elastic_hit(hit, transform=True):
"""
Parses the elasticsearch search hit into the format that is also used by the edurep endpoint.
It's mostly just mapping the variables we need into the places that we expect them to be.
:param hit: result from elasticsearch
:return record: parsed record in elasticsearch format
"""
data = hit["_source"]
serializer = SearchResultSerializer()
# Basic mapping between field and data (excluding any method fields with a source of "*")
field_mapping = {
field.source: field_name if transform else field.source
for field_name, field in serializer.fields.items() if field.source != "*"
}
record = {
field_mapping[field]: value
for field, value in data.items() if field in field_mapping
}
# Reformatting some fields if a relations field is desired
if "relations" in field_mapping:
publishers = [{"name": publisher} for publisher in data.get("publishers", [])]
record["relations"] = {
"authors": data.get("authors", []),
"parties": data.get("parties", publishers),
"projects": data.get("projects", []),
"keywords": [{"label": keyword} for keyword in data.get("keywords", [])],
"themes": [{"label": theme} for theme in data.get("research_themes", [])],
"parents": data.get("is_part_of", []),
"children": data.get("has_parts", [])
}
# Calling methods on serializers to set data for method fields
for field_name, field in serializer.fields.items():
if field.source != "*":
continue
record[field_name] = getattr(serializer, field.method_name)(data)
return record
def autocomplete(self, query):
"""
Use the elasticsearch suggest query to get typing hints during searching.
:param query: the input from the user so far
:return: a list of options matching the input query, sorted by length
"""
# build the query for elasticsearch.
query_dictionary = {
'suggest': {
"autocomplete": {
'text': query,
"completion": {
"field": "suggest_completion",
"size": 100
}
}
}
}
result = self.client.search(
index=[self.index_nl, self.index_en, self.index_unk],
body=query_dictionary
)
# extract the options from the elasticsearch result, remove duplicates,
# remove non-matching prefixes (elastic will suggest things that don't match _exactly_)
# and sort by length
autocomplete = result['suggest']['autocomplete']
options = autocomplete[0]['options']
flat_options = list(set([item for option in options for item in option['_source']['suggest_completion']]))
options_with_prefix = [option for option in flat_options if option.startswith(query)]
options_with_prefix.sort(key=lambda option: len(option))
return options_with_prefix
def drilldowns(self, drilldown_names, search_text=None, filters=None):
"""
This function is named drilldowns is because it's also named drilldowns in the original edurep search code.
It passes on information to search, and returns the search without the records.
This allows calculation of 'item counts' (i.e. how many results there are in through a certain filter)
"""
search_results = self.search(search_text=search_text, filters=filters, drilldown_names=drilldown_names)
search_results["records"] = []
return search_results
def search(self, search_text, drilldown_names=None, filters=None, ordering=None, page=1, page_size=5):
"""
Build and send a query to elasticsearch and parse it before returning.
:param search_text: A list of strings to search for.
:param drilldown_names: A list of the 'drilldowns' (filters) that are to be counted by elasticsearch.
:param filters: The filters that are applied for this search.
:param ordering: Sort the results by this ordering (or use default elastic ordering otherwise)
:param page: The page index of the results
:param page_size: How many items are loaded per page.
:return:
"""
start_record = page_size * (page - 1)
body = {
'query': {
"bool": defaultdict(list)
},
'from': start_record,
'size': page_size,
'post_filter': {
"bool": defaultdict(list)
}
}
if search_text:
query_string = {
"simple_query_string": {
"fields": SEARCH_FIELDS,
"query": search_text,
"default_operator": "and"
}
}
body["query"]["bool"]["must"] += [query_string]
body["query"]["bool"]["should"] = {
"distance_feature": {
"field": "publisher_date",
"pivot": "90d",
"origin": "now",
"boost": 1.15
}
}
body["suggest"] = {
'did-you-mean-suggestion': {
'text': search_text,
'phrase': {
'field': 'suggest_phrase',
'size': 1,
'gram_size': 3,
'direct_generator': [{
'field': 'suggest_phrase',
'suggest_mode': 'always'
}],
},
}
}
indices = self.parse_index_language(self, filters)
if drilldown_names:
body["aggs"] = self.parse_aggregations(drilldown_names, filters)
filters = self.parse_filters(filters)
if filters:
body["post_filter"]["bool"]["must"] += filters
if ordering:
body["sort"] = [
self.parse_ordering(ordering),
"_score"
]
# make query and parse
result = self.client.search(
index=indices,
body=body
)
return self.parse_elastic_result(result)
def get_materials_by_id(self, external_ids, page=1, page_size=10, **kwargs):
"""
Retrieve specific materials from elastic through their external id.
:param external_ids: the id's of the materials to retrieve
:param page: The page index of the results
:param page_size: How many items are loaded per page.
:return: a list of search results (like a regular search).
"""
start_record = page_size * (page - 1)
normalized_external_ids = []
for external_id in external_ids:
if not external_id.startswith("surf"):
normalized_external_ids.append(external_id)
else:
external_id_parts = external_id.split(":")
normalized_external_ids.append(external_id_parts[-1])
result = self.client.search(
index=[self.index_nl, self.index_en, self.index_unk],
body={
"query": {
"bool": {
"must": [{"terms": {"external_id": normalized_external_ids}}]
}
},
'from': start_record,
'size': page_size,
},
)
results = self.parse_elastic_result(result)
materials = {
material["external_id"]: material
for material in results["records"]
}
records = []
for external_id in normalized_external_ids:
if external_id not in materials:
continue
records.append(materials[external_id])
results["recordcount"] = len(records)
results["records"] = records
return results
def stats(self):
stats = self.client.count(index=",".join([self.index_nl, self.index_en, self.index_unk]))
return stats.get("count", 0)
def more_like_this(self, external_id, language):
index = self.languages.get(language, self.index_unk)
body = {
"query": {
"more_like_this": {
"fields": ["title", "description"],
"like": [
{
"_index": index,
"_id": external_id
}
],
"min_term_freq": 1,
"max_query_terms": 12
}
}
}
search_result = self.client.search(
index=index,
body=body
)
hits = search_result.pop("hits")
result = dict()
result["records_total"] = hits["total"]["value"]
result["results"] = [
ElasticSearchApiClient.parse_elastic_hit(hit, transform=False)
for hit in hits["hits"]
]
return result
def author_suggestions(self, author_name):
body = {
"query": {
"bool": {
"must": {
"multi_match": {
"fields": [field for field in SEARCH_FIELDS if "authors" not in field],
"query": author_name,
},
},
"must_not": {
"match": {"authors.name.folded": author_name}
}
}
}
}
search_result = self.client.search(
index=[self.index_nl, self.index_en, self.index_unk],
body=body
)
hits = search_result.pop("hits")
result = dict()
result["records_total"] = hits["total"]["value"]
result["results"] = [
ElasticSearchApiClient.parse_elastic_hit(hit, transform=False)
for hit in hits["hits"]
]
return result
@staticmethod
def parse_filters(filters):
"""
Parse filters from the edurep format into the elastic query format.
Not every filter is handled by elastic in the same way so it's a lot of manual parsing.
:param filters: the list of filters to be parsed
:return: the filters in the format for an elasticsearch query.
"""
if not filters:
return {}
filter_items = []
for filter_item in filters:
# skip filter_items that are empty
# and the language filter item (it's handled by telling elastic in what index to search).
if not filter_item['items'] or 'lom.general.language' in filter_item['external_id']:
continue
elastic_type = ElasticSearchApiClient.translate_external_id_to_elastic_type(filter_item['external_id'])
# date range query
if elastic_type == "publisher_date":
lower_bound, upper_bound = filter_item["items"]
if lower_bound is not None or upper_bound is not None:
filter_items.append({
"range": {
"publisher_date": {
"gte": lower_bound,
"lte": upper_bound
}
}
})
# all other filter types are handled by just using elastic terms with the 'translated' filter items
else:
filter_items.append({
"terms": {
elastic_type: filter_item["items"]
}
})
return filter_items
def parse_aggregations(self, aggregation_names, filters):
"""
Parse the aggregations so elastic can count the items properly.
:param aggregation_names: the names of the aggregations to
:param filters: the filters for the query
:return:
"""
aggregation_items = {}
for aggregation_name in aggregation_names:
other_filters = []
if filters:
other_filters = list(filter(lambda x: x['external_id'] != aggregation_name, filters))
other_filters = self.parse_filters(other_filters)
elastic_type = ElasticSearchApiClient.translate_external_id_to_elastic_type(aggregation_name)
if len(other_filters) > 0:
# Filter the aggregation by the filters applied to other categories
aggregation_items[aggregation_name] = {
"filter": {
"bool": {
"must": other_filters
}
},
"aggs": {
"filtered": {
"terms": {
"field": elastic_type,
"size": 500,
}
}
},
}
else:
aggregation_items[aggregation_name] = {
"terms": {
"field": elastic_type,
"size": 500,
}
}
return aggregation_items
@staticmethod
def parse_ordering(ordering):
"""
Parse the ordering format ('asc', 'desc' or None) into the type that elasticsearch expects.
"""
order = "asc"
if ordering.startswith("-"):
order = "desc"
ordering = ordering[1:]
elastic_type = ElasticSearchApiClient.translate_external_id_to_elastic_type(ordering)
return {elastic_type: {"order": order}}
@staticmethod
def parse_index_language(self, filters):
"""
Select the index to search on based on language.
"""
# if no language is selected, search on both.
indices = [self.index_nl, self.index_en, self.index_unk]
if not filters:
return indices
language_item = [filter_item for filter_item in filters if filter_item['external_id'] == 'lom.general.language']
if not language_item:
return indices
language_indices = [f"latest-{language}" for language in language_item[0]['items']]
return language_indices if len(language_indices) else indices
@staticmethod
def translate_external_id_to_elastic_type(external_id):
""" The external id's used in edurep need to be parsed to fields in elasticsearch. """
if external_id == 'lom.technical.format':
return 'technical_type'
elif external_id == 'about.repository':
return 'harvest_source'
elif external_id == 'lom.rights.copyrightandotherrestrictions':
return 'copyright.keyword'
elif external_id == 'lom.educational.context':
return 'lom_educational_levels'
elif external_id == 'lom.lifecycle.contribute.publisherdate':
return 'publisher_date'
elif external_id == 'lom.classification.obk.discipline.id':
return 'disciplines'
elif external_id == 'lom.lifecycle.contribute.author':
return 'authors.name.keyword'
elif external_id == 'lom.general.language':
return 'language.keyword'
elif external_id == 'lom.general.aggregationlevel':
return 'aggregation_level'
elif external_id == 'lom.lifecycle.contribute.publisher':
return 'publishers.keyword'
return external_id
| 39.212851
| 120
| 0.545678
|
c6b614b4823fc11e5d9c7275c261f6a66e6c021d
| 13,932
|
py
|
Python
|
contrib/gitian-build.py
|
blenda123/labx
|
2d6495c3e93fe6e6b734d5bc183d09821a3d0991
|
[
"MIT"
] | 3
|
2019-07-14T15:43:43.000Z
|
2020-09-17T04:01:50.000Z
|
contrib/gitian-build.py
|
blenda123/labx
|
2d6495c3e93fe6e6b734d5bc183d09821a3d0991
|
[
"MIT"
] | null | null | null |
contrib/gitian-build.py
|
blenda123/labx
|
2d6495c3e93fe6e6b734d5bc183d09821a3d0991
|
[
"MIT"
] | 5
|
2019-04-30T22:18:20.000Z
|
2020-07-02T21:20:02.000Z
|
#!/usr/bin/env python3
import argparse
import os
import subprocess
import sys
def setup():
global args, workdir
programs = ['ruby', 'git', 'apt-cacher-ng', 'make', 'wget']
if args.kvm:
programs += ['python-vm-builder', 'qemu-kvm', 'qemu-utils']
elif args.docker:
dockers = ['docker.io', 'docker-ce']
for i in dockers:
return_code = subprocess.call(['sudo', 'apt-get', 'install', '-qq', i])
if return_code == 0:
break
if return_code != 0:
print('Cannot find any way to install docker', file=sys.stderr)
exit(1)
else:
programs += ['lxc', 'debootstrap']
subprocess.check_call(['sudo', 'apt-get', 'install', '-qq'] + programs)
if not os.path.isdir('gitian.sigs'):
subprocess.check_call(['git', 'clone', 'https://github.com/LABX-Project/gitian.sigs.git'])
if not os.path.isdir('LABX-detached-sigs'):
subprocess.check_call(['git', 'clone', 'https://github.com/LABX-Project/LABX-detached-sigs.git'])
if not os.path.isdir('gitian-builder'):
subprocess.check_call(['git', 'clone', 'https://github.com/devrandom/gitian-builder.git'])
if not os.path.isdir('LABX'):
subprocess.check_call(['git', 'clone', 'https://github.com/LABX-Project/LABX.git'])
os.chdir('gitian-builder')
make_image_prog = ['bin/make-base-vm', '--suite', 'bionic', '--arch', 'amd64']
if args.docker:
make_image_prog += ['--docker']
elif not args.kvm:
make_image_prog += ['--lxc']
subprocess.check_call(make_image_prog)
os.chdir(workdir)
if args.is_bionic and not args.kvm and not args.docker:
subprocess.check_call(['sudo', 'sed', '-i', 's/lxcbr0/br0/', '/etc/default/lxc-net'])
print('Reboot is required')
exit(0)
def build():
global args, workdir
os.makedirs('LABX-binaries/' + args.version, exist_ok=True)
print('\nBuilding Dependencies\n')
os.chdir('gitian-builder')
os.makedirs('inputs', exist_ok=True)
subprocess.check_call(['wget', '-N', '-P', 'inputs', 'http://downloads.sourceforge.net/project/osslsigncode/osslsigncode/osslsigncode-1.7.1.tar.gz'])
subprocess.check_call(['wget', '-N', '-P', 'inputs', 'https://bitcoincore.org/cfields/osslsigncode-Backports-to-1.7.1.patch'])
subprocess.check_call(['make', '-C', '../LABX/depends', 'download', 'SOURCES_PATH=' + os.getcwd() + '/cache/common'])
if args.linux:
print('\nCompiling ' + args.version + ' Linux')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'labx='+args.commit, '--url', 'labx='+args.url, '../LABX/contrib/gitian-descriptors/gitian-linux.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-linux', '--destination', '../gitian.sigs/', '../LABX/contrib/gitian-descriptors/gitian-linux.yml'])
subprocess.check_call('mv build/out/labx-*.tar.gz build/out/src/labx-*.tar.gz ../LABX-binaries/'+args.version, shell=True)
if args.windows:
print('\nCompiling ' + args.version + ' Windows')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'labx='+args.commit, '--url', 'labx='+args.url, '../LABX/contrib/gitian-descriptors/gitian-win.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-unsigned', '--destination', '../gitian.sigs/', '../LABX/contrib/gitian-descriptors/gitian-win.yml'])
subprocess.check_call('mv build/out/labx-*-win-unsigned.tar.gz inputs/', shell=True)
subprocess.check_call('mv build/out/labx-*.zip build/out/labx-*.exe ../LABX-binaries/'+args.version, shell=True)
if args.macos:
print('\nCompiling ' + args.version + ' MacOS')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'labx='+args.commit, '--url', 'labx='+args.url, '../LABX/contrib/gitian-descriptors/gitian-osx.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-unsigned', '--destination', '../gitian.sigs/', '../LABX/contrib/gitian-descriptors/gitian-osx.yml'])
subprocess.check_call('mv build/out/labx-*-osx-unsigned.tar.gz inputs/', shell=True)
subprocess.check_call('mv build/out/labx-*.tar.gz build/out/labx-*.dmg ../LABX-binaries/'+args.version, shell=True)
os.chdir(workdir)
if args.commit_files:
print('\nCommitting '+args.version+' Unsigned Sigs\n')
os.chdir('gitian.sigs')
subprocess.check_call(['git', 'config', 'user.signingkey', args.signer])
if args.linux:
subprocess.check_call(['git', 'add', args.version+'-linux/'+args.signer])
if args.windows:
subprocess.check_call(['git', 'add', args.version+'-win-unsigned/'+args.signer])
if args.macos:
subprocess.check_call(['git', 'add', args.version+'-osx-unsigned/'+args.signer])
subprocess.check_call(['git', 'commit', '-m', 'Add '+args.version+' unsigned sigs for '+args.signer])
os.chdir(workdir)
def sign():
global args, workdir
os.chdir('gitian-builder')
if args.windows:
print('\nSigning ' + args.version + ' Windows')
subprocess.check_call('cp inputs/LABX-' + args.version + '-win-unsigned.tar.gz inputs/LABX-win-unsigned.tar.gz', shell=True)
subprocess.check_call(['bin/gbuild', '-i', '--commit', 'signature='+args.commit, '../LABX/contrib/gitian-descriptors/gitian-win-signer.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-signed', '--destination', '../gitian.sigs/', '../LABX/contrib/gitian-descriptors/gitian-win-signer.yml'])
subprocess.check_call('mv build/out/labx-*win64-setup.exe ../LABX-binaries/'+args.version, shell=True)
subprocess.check_call('mv build/out/labx-*win32-setup.exe ../LABX-binaries/'+args.version, shell=True)
if args.macos:
print('\nSigning ' + args.version + ' MacOS')
subprocess.check_call('cp inputs/LABX-' + args.version + '-osx-unsigned.tar.gz inputs/LABX-osx-unsigned.tar.gz', shell=True)
subprocess.check_call(['bin/gbuild', '-i', '--commit', 'signature='+args.commit, '../LABX/contrib/gitian-descriptors/gitian-osx-signer.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-signed', '--destination', '../gitian.sigs/', '../LABX/contrib/gitian-descriptors/gitian-osx-signer.yml'])
subprocess.check_call('mv build/out/labx-osx-signed.dmg ../LABX-binaries/'+args.version+'/LABX-'+args.version+'-osx.dmg', shell=True)
os.chdir(workdir)
if args.commit_files:
print('\nCommitting '+args.version+' Signed Sigs\n')
os.chdir('gitian.sigs')
if args.windows:
subprocess.check_call(['git', 'add', args.version+'-win-signed/'+args.signer])
if args.macos:
subprocess.check_call(['git', 'add', args.version+'-osx-signed/'+args.signer])
subprocess.check_call(['git', 'commit', '-S', '-m', 'Add '+args.version+' signed binary sigs for '+args.signer])
os.chdir(workdir)
def verify():
global args, workdir
os.chdir('gitian-builder')
if args.linux:
print('\nVerifying v'+args.version+' Linux\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-linux', '../LABX/contrib/gitian-descriptors/gitian-linux.yml'])
print('\nVerifying v'+args.version+' Linux\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-linux', '../LABX/contrib/gitian-descriptors/gitian-linux.yml'])
if args.windows:
print('\nVerifying v'+args.version+' Windows\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-unsigned', '../LABX/contrib/gitian-descriptors/gitian-win.yml'])
if args.sign:
print('\nVerifying v'+args.version+' Signed Windows\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-signed', '../LABX/contrib/gitian-descriptors/gitian-win-signer.yml'])
if args.macos:
print('\nVerifying v'+args.version+' MacOS\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-unsigned', '../LABX/contrib/gitian-descriptors/gitian-osx.yml'])
if args.sign:
print('\nVerifying v'+args.version+' Signed MacOS\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-signed', '../LABX/contrib/gitian-descriptors/gitian-osx-signer.yml'])
os.chdir(workdir)
def main():
global args, workdir
parser = argparse.ArgumentParser(usage='%(prog)s [options] signer version')
parser.add_argument('-c', '--commit', action='store_true', dest='commit', help='Indicate that the version argument is for a commit or branch')
parser.add_argument('-p', '--pull', action='store_true', dest='pull', help='Indicate that the version argument is the number of a github repository pull request')
parser.add_argument('-u', '--url', dest='url', default='https://github.com/LABX-Project/LABX', help='Specify the URL of the repository. Default is %(default)s')
parser.add_argument('-v', '--verify', action='store_true', dest='verify', help='Verify the Gitian build')
parser.add_argument('-b', '--build', action='store_true', dest='build', help='Do a Gitian build')
parser.add_argument('-s', '--sign', action='store_true', dest='sign', help='Make signed binaries for Windows and MacOS')
parser.add_argument('-B', '--buildsign', action='store_true', dest='buildsign', help='Build both signed and unsigned binaries')
parser.add_argument('-o', '--os', dest='os', default='lwm', help='Specify which Operating Systems the build is for. Default is %(default)s. l for Linux, w for Windows, m for MacOS')
parser.add_argument('-j', '--jobs', dest='jobs', default='2', help='Number of processes to use. Default %(default)s')
parser.add_argument('-m', '--memory', dest='memory', default='2000', help='Memory to allocate in MiB. Default %(default)s')
parser.add_argument('-k', '--kvm', action='store_true', dest='kvm', help='Use KVM instead of LXC')
parser.add_argument('-d', '--docker', action='store_true', dest='docker', help='Use Docker instead of LXC')
parser.add_argument('-S', '--setup', action='store_true', dest='setup', help='Set up the Gitian building environment. Uses LXC. If you want to use KVM, use the --kvm option. Only works on Debian-based systems (Ubuntu, Debian)')
parser.add_argument('-D', '--detach-sign', action='store_true', dest='detach_sign', help='Create the assert file for detached signing. Will not commit anything.')
parser.add_argument('-n', '--no-commit', action='store_false', dest='commit_files', help='Do not commit anything to git')
parser.add_argument('signer', help='GPG signer to sign each build assert file')
parser.add_argument('version', help='Version number, commit, or branch to build. If building a commit or branch, the -c option must be specified')
args = parser.parse_args()
workdir = os.getcwd()
args.linux = 'l' in args.os
args.windows = 'w' in args.os
args.macos = 'm' in args.os
args.is_bionic = b'bionic' in subprocess.check_output(['lsb_release', '-cs'])
if args.buildsign:
args.build=True
args.sign=True
if args.kvm and args.docker:
raise Exception('Error: cannot have both kvm and docker')
args.sign_prog = 'true' if args.detach_sign else 'gpg --detach-sign'
# Set environment variable USE_LXC or USE_DOCKER, let gitian-builder know that we use lxc or docker
if args.docker:
os.environ['USE_DOCKER'] = '1'
elif not args.kvm:
os.environ['USE_LXC'] = '1'
if not 'GITIAN_HOST_IP' in os.environ.keys():
os.environ['GITIAN_HOST_IP'] = '10.0.3.1'
if not 'LXC_GUEST_IP' in os.environ.keys():
os.environ['LXC_GUEST_IP'] = '10.0.3.5'
# Disable for MacOS if no SDK found
if args.macos and not os.path.isfile('gitian-builder/inputs/MacOSX10.11.sdk.tar.gz'):
print('Cannot build for MacOS, SDK does not exist. Will build for other OSes')
args.macos = False
script_name = os.path.basename(sys.argv[0])
# Signer and version shouldn't be empty
if args.signer == '':
print(script_name+': Missing signer.')
print('Try '+script_name+' --help for more information')
exit(1)
if args.version == '':
print(script_name+': Missing version.')
print('Try '+script_name+' --help for more information')
exit(1)
# Add leading 'v' for tags
if args.commit and args.pull:
raise Exception('Cannot have both commit and pull')
args.commit = ('' if args.commit else 'v') + args.version
if args.setup:
setup()
os.chdir('LABX')
if args.pull:
subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge'])
os.chdir('../gitian-builder/inputs/LABX')
subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge'])
args.commit = subprocess.check_output(['git', 'show', '-s', '--format=%H', 'FETCH_HEAD'], universal_newlines=True, encoding='utf8').strip()
args.version = 'pull-' + args.version
print(args.commit)
subprocess.check_call(['git', 'fetch'])
subprocess.check_call(['git', 'checkout', args.commit])
os.chdir(workdir)
if args.build:
build()
if args.sign:
sign()
if args.verify:
verify()
if __name__ == '__main__':
main()
| 55.951807
| 231
| 0.641545
|
cbfbf6edd363af8df55d8558e0ee275bd8db2e73
| 11,516
|
py
|
Python
|
tests-asyncio/test_asyncio_based_service.py
|
cburgdorf/async-service
|
04bbca368b0dfdeb87e8f9ff871ffb23a27b01ee
|
[
"MIT"
] | null | null | null |
tests-asyncio/test_asyncio_based_service.py
|
cburgdorf/async-service
|
04bbca368b0dfdeb87e8f9ff871ffb23a27b01ee
|
[
"MIT"
] | null | null | null |
tests-asyncio/test_asyncio_based_service.py
|
cburgdorf/async-service
|
04bbca368b0dfdeb87e8f9ff871ffb23a27b01ee
|
[
"MIT"
] | null | null | null |
import asyncio
import pytest
from async_service import (
AsyncioManager,
DaemonTaskExit,
Service,
as_service,
background_asyncio_service,
)
class WaitCancelledService(Service):
async def run(self) -> None:
await self.manager.wait_cancelled()
async def do_service_lifecycle_check(
manager, manager_run_fn, trigger_exit_condition_fn, should_be_cancelled
):
assert manager.is_started is False
assert manager.is_running is False
assert manager.is_cancelled is False
assert manager.is_stopping is False
assert manager.is_finished is False
asyncio.ensure_future(manager_run_fn())
await asyncio.wait_for(manager.wait_started(), timeout=0.1)
assert manager.is_started is True
assert manager.is_running is True
assert manager.is_cancelled is False
assert manager.is_stopping is False
assert manager.is_finished is False
# trigger the service to exit
trigger_exit_condition_fn()
await asyncio.wait_for(manager.wait_stopping(), timeout=0.1)
if should_be_cancelled:
assert manager.is_started is True
# We cannot determine whether the service should be running at this
# stage because a service is considered running until it is marked as
# stopping. Since it may be cancelled but still not stopped we
# can't know.
assert manager.is_cancelled is True
assert manager.is_stopping is True
# We cannot determine whether a service should be finished at this
# stage as it could have exited cleanly and is now finished or it
# might be doing some cleanup after which it will register as being
# finished.
await asyncio.wait_for(manager.wait_finished(), timeout=0.1)
assert manager.is_started is True
assert manager.is_running is False
assert manager.is_cancelled is should_be_cancelled
assert manager.is_stopping is False
assert manager.is_finished is True
def test_service_manager_initial_state():
service = WaitCancelledService()
manager = AsyncioManager(service)
assert manager.is_started is False
assert manager.is_running is False
assert manager.is_cancelled is False
assert manager.is_finished is False
@pytest.mark.asyncio
async def test_asyncio_service_lifecycle_run_and_clean_exit():
trigger_exit = asyncio.Event()
@as_service
async def ServiceTest(manager):
await trigger_exit.wait()
service = ServiceTest()
manager = AsyncioManager(service)
await do_service_lifecycle_check(
manager=manager,
manager_run_fn=manager.run,
trigger_exit_condition_fn=trigger_exit.set,
should_be_cancelled=False,
)
@pytest.mark.asyncio
async def test_asyncio_service_lifecycle_run_and_external_cancellation():
@as_service
async def ServiceTest(manager):
while True:
await asyncio.sleep(0)
service = ServiceTest()
manager = AsyncioManager(service)
await do_service_lifecycle_check(
manager=manager,
manager_run_fn=manager.run,
trigger_exit_condition_fn=manager.cancel,
should_be_cancelled=True,
)
@pytest.mark.asyncio
async def test_asyncio_service_lifecycle_run_and_exception():
trigger_error = asyncio.Event()
@as_service
async def ServiceTest(manager):
await trigger_error.wait()
raise RuntimeError("Service throwing error")
service = ServiceTest()
manager = AsyncioManager(service)
async def do_service_run():
with pytest.raises(RuntimeError, match="Service throwing error"):
await manager.run()
await do_service_lifecycle_check(
manager=manager,
manager_run_fn=do_service_run,
trigger_exit_condition_fn=trigger_error.set,
should_be_cancelled=True,
)
@pytest.mark.asyncio
async def test_asyncio_service_lifecycle_run_and_task_exception():
trigger_error = asyncio.Event()
@as_service
async def ServiceTest(manager):
async def task_fn():
await trigger_error.wait()
raise RuntimeError("Service throwing error")
manager.run_task(task_fn)
service = ServiceTest()
manager = AsyncioManager(service)
async def do_service_run():
with pytest.raises(RuntimeError, match="Service throwing error"):
await manager.run()
await do_service_lifecycle_check(
manager=manager,
manager_run_fn=do_service_run,
trigger_exit_condition_fn=trigger_error.set,
should_be_cancelled=True,
)
@pytest.mark.asyncio
async def test_asyncio_service_lifecycle_run_and_daemon_task_exit():
trigger_error = asyncio.Event()
@as_service
async def ServiceTest(manager):
async def daemon_task_fn():
await trigger_error.wait()
manager.run_daemon_task(daemon_task_fn)
service = ServiceTest()
manager = AsyncioManager(service)
async def do_service_run():
with pytest.raises(DaemonTaskExit, match="Daemon task"):
await manager.run()
await do_service_lifecycle_check(
manager=manager,
manager_run_fn=do_service_run,
trigger_exit_condition_fn=trigger_error.set,
should_be_cancelled=True,
)
@pytest.mark.asyncio
async def test_asyncio_service_background_service_context_manager():
service = WaitCancelledService()
async with background_asyncio_service(service) as manager:
# ensure the manager property is set.
assert hasattr(service, "manager")
assert service.manager is manager
assert manager.is_started is True
assert manager.is_running is True
assert manager.is_cancelled is False
assert manager.is_finished is False
assert manager.is_started is True
assert manager.is_running is False
assert manager.is_cancelled is True
assert manager.is_finished is True
@pytest.mark.asyncio
async def test_asyncio_service_manager_stop():
service = WaitCancelledService()
async with background_asyncio_service(service) as manager:
assert manager.is_started is True
assert manager.is_running is True
assert manager.is_cancelled is False
assert manager.is_finished is False
await manager.stop()
assert manager.is_started is True
assert manager.is_running is False
assert manager.is_cancelled is True
assert manager.is_finished is True
@pytest.mark.asyncio
async def test_asyncio_service_manager_run_task():
task_event = asyncio.Event()
@as_service
async def RunTaskService(manager):
async def task_fn():
task_event.set()
manager.run_task(task_fn)
await manager.wait_cancelled()
async with background_asyncio_service(RunTaskService()):
await asyncio.wait_for(task_event.wait(), timeout=0.1)
@pytest.mark.asyncio
async def test_asyncio_service_manager_run_task_waits_for_task_completion():
task_event = asyncio.Event()
@as_service
async def RunTaskService(manager):
async def task_fn():
await asyncio.sleep(0.01)
task_event.set()
manager.run_task(task_fn)
# the task is set to run in the background but then the service exits.
# We want to be sure that the task is allowed to continue till
# completion unless explicitely cancelled.
async with background_asyncio_service(RunTaskService()):
await asyncio.wait_for(task_event.wait(), timeout=0.1)
@pytest.mark.asyncio
async def test_asyncio_service_manager_run_task_can_still_cancel_after_run_finishes():
task_event = asyncio.Event()
service_finished = asyncio.Event()
@as_service
async def RunTaskService(manager):
async def task_fn():
# this will never complete
await task_event.wait()
manager.run_task(task_fn)
# the task is set to run in the background but then the service exits.
# We want to be sure that the task is allowed to continue till
# completion unless explicitely cancelled.
service_finished.set()
async with background_asyncio_service(RunTaskService()) as manager:
await asyncio.wait_for(service_finished.wait(), timeout=0.01)
# show that the service hangs waiting for the task to complete.
with pytest.raises(asyncio.TimeoutError):
await asyncio.wait_for(manager.wait_stopping(), timeout=0.01)
# trigger cancellation and see that the service actually stops
manager.cancel()
await asyncio.wait_for(manager.wait_stopping(), timeout=0.01)
@pytest.mark.asyncio
async def test_asyncio_service_manager_run_task_reraises_exceptions():
task_event = asyncio.Event()
@as_service
async def RunTaskService(manager):
async def task_fn():
await task_event.wait()
raise Exception("task exception in run_task")
manager.run_task(task_fn)
await asyncio.wait_for(asyncio.sleep(100), timeout=1)
with pytest.raises(BaseException, match="task exception in run_task"):
async with background_asyncio_service(RunTaskService()) as manager:
task_event.set()
await manager.wait_stopping()
pass
@pytest.mark.asyncio
async def test_asyncio_service_manager_run_daemon_task_cancels_if_exits():
task_event = asyncio.Event()
@as_service
async def RunTaskService(manager):
async def daemon_task_fn():
await task_event.wait()
manager.run_daemon_task(daemon_task_fn, name="daemon_task_fn")
await asyncio.wait_for(asyncio.sleep(100), timeout=1)
with pytest.raises(DaemonTaskExit, match="Daemon task daemon_task_fn exited"):
async with background_asyncio_service(RunTaskService()) as manager:
task_event.set()
await manager.wait_stopping()
@pytest.mark.asyncio
async def test_asyncio_service_manager_propogates_and_records_exceptions():
@as_service
async def ThrowErrorService(manager):
raise RuntimeError("this is the error")
service = ThrowErrorService()
manager = AsyncioManager(service)
assert manager.did_error is False
with pytest.raises(RuntimeError, match="this is the error"):
await manager.run()
assert manager.did_error is True
@pytest.mark.asyncio
async def test_asyncio_service_lifecycle_run_and_clean_exit_with_child_service():
trigger_exit = asyncio.Event()
@as_service
async def ChildServiceTest(manager):
await trigger_exit.wait()
@as_service
async def ServiceTest(manager):
child_manager = manager.run_child_service(ChildServiceTest())
await child_manager.wait_started()
service = ServiceTest()
manager = AsyncioManager(service)
await do_service_lifecycle_check(
manager=manager,
manager_run_fn=manager.run,
trigger_exit_condition_fn=trigger_exit.set,
should_be_cancelled=False,
)
@pytest.mark.asyncio
async def test_asyncio_service_with_async_generator():
is_within_agen = asyncio.Event()
async def do_agen():
while True:
yield
@as_service
async def ServiceTest(manager):
async for _ in do_agen(): # noqa: F841
await asyncio.sleep(0)
is_within_agen.set()
async with background_asyncio_service(ServiceTest()) as manager:
await is_within_agen.wait()
manager.cancel()
| 29.757106
| 86
| 0.709708
|
c5e715aa368c5f69b46b85168ee56a146a3eb0b9
| 5,559
|
py
|
Python
|
syft/generic/object_storage.py
|
NicoSerranoP/PySyft
|
87fcd566c46fce4c16d363c94396dd26bd82a016
|
[
"Apache-2.0"
] | null | null | null |
syft/generic/object_storage.py
|
NicoSerranoP/PySyft
|
87fcd566c46fce4c16d363c94396dd26bd82a016
|
[
"Apache-2.0"
] | null | null | null |
syft/generic/object_storage.py
|
NicoSerranoP/PySyft
|
87fcd566c46fce4c16d363c94396dd26bd82a016
|
[
"Apache-2.0"
] | null | null | null |
from collections import defaultdict
from typing import Union
from syft.exceptions import ObjectNotFoundError
from syft.generic.frameworks.types import FrameworkTensor
from syft.generic.frameworks.types import FrameworkTensorType
from syft.generic.abstract.tensor import AbstractTensor
from syft.workers.abstract import AbstractWorker
class ObjectStore:
"""A storage of objects identifiable by their id.
A wrapper object to a collection of objects where all objects
are stored using their IDs as keys.
"""
def __init__(self, owner: AbstractWorker = None):
self.owner = owner
# This is the collection of objects being stored.
self._objects = {}
# This is an index to retrieve objects from their tags in an efficient way
self._tag_to_object_ids = defaultdict(set)
@property
def _tensors(self):
return {id_: obj for id_, obj in self._objects.items() if isinstance(obj, FrameworkTensor)}
def register_obj(self, obj: object, obj_id: Union[str, int] = None):
"""Registers the specified object with the current worker node.
Selects an id for the object, assigns a list of owners, and establishes
whether it's a pointer or not. This method is generally not used by the
client and is instead used by internal processes (hooks and workers).
Args:
obj: A torch Tensor or Variable object to be registered.
obj_id (int or string): random integer between 0 and 1e10 or
string uniquely identifying the object.
"""
if obj_id is not None and hasattr(obj, "id"):
obj.id = obj_id
self.set_obj(obj)
def de_register_obj(self, obj: object, _recurse_torch_objs: bool = True):
"""Deregisters the specified object.
Deregister and remove attributes which are indicative of registration.
Args:
obj: A torch Tensor or Variable object to be deregistered.
_recurse_torch_objs: A boolean indicating whether the object is
more complex and needs to be explored. Is not supported at the
moment.
"""
if hasattr(obj, "id"):
self.rm_obj(obj.id)
if hasattr(obj, "_owner"):
del obj._owner
def get_obj(self, obj_id: Union[str, int]) -> object:
"""Returns the object from registry.
Look up an object from the registry using its ID.
Args:
obj_id: A string or integer id of an object to look up.
Returns:
Object with id equals to `obj_id`.
"""
try:
obj = self._objects[obj_id]
except KeyError as e:
if obj_id not in self._objects:
raise ObjectNotFoundError(obj_id, self)
else:
raise e
return obj
def set_obj(self, obj: Union[FrameworkTensorType, AbstractTensor]) -> None:
"""Adds an object to the registry of objects.
Args:
obj: A torch or syft tensor with an id.
"""
obj.owner = self.owner
self._objects[obj.id] = obj
# Add entry in the tag index
if obj.tags:
for tag in obj.tags:
if tag not in self._tag_to_object_ids:
self._tag_to_object_ids[tag] = {obj.id}
else:
self._tag_to_object_ids[tag].add(obj.id)
def rm_obj(self, obj_id: Union[str, int], force=False):
"""Removes an object.
Remove the object from the permanent object registry if it exists.
Args:
obj_id: A string or integer representing id of the object to be
removed.
force: if true, explicitly forces removal of the object modifying the
`garbage_collect_data` attribute.
"""
if obj_id in self._objects:
obj = self._objects[obj_id]
# update tag index
if obj.tags:
for tag in obj.tags:
if tag not in self._tag_to_object_ids:
self._tag_to_object_ids[tag].remove(obj.id)
if force and hasattr(obj, "child") and hasattr(obj.child, "garbage_collect_data"):
obj.child.garbage_collect_data = True
del self._objects[obj_id]
def force_rm_obj(self, obj_id: Union[str, int]):
self.rm_obj(obj_id, force=True)
def clear_objects(self):
"""Removes all objects from the object storage."""
self._objects.clear()
def current_objects(self):
"""Returns a copy of the objects in the object storage."""
return self._objects.copy()
def find_by_id(self, id):
"""Local search by id"""
return self._objects.get(id)
def find_by_tag(self, tag):
"""Local search by tag
Args:
tag (str): exact tag searched
Return:
A list of results, possibly empty
"""
if tag in self._tag_to_object_ids:
results = []
for obj_id in self._tag_to_object_ids[tag]:
obj = self.find_by_id(obj_id)
if obj is not None:
results.append(obj)
return results
return []
def register_tags(self, obj):
# NOTE: this is a fix to correct faulty registration that can sometimes happen
if obj.id not in self._objects:
self.owner.register_obj(obj)
for tag in obj.tags:
self._tag_to_object_ids[tag].add(obj.id)
| 33.896341
| 99
| 0.607124
|
43509855ed58236941b50167586487fdff5873d9
| 2,939
|
py
|
Python
|
src/model/strict_balance.py
|
CVbluecat/TIPS
|
823974f418bbd970b1f5be07f6ff5de4653f717e
|
[
"MIT"
] | null | null | null |
src/model/strict_balance.py
|
CVbluecat/TIPS
|
823974f418bbd970b1f5be07f6ff5de4653f717e
|
[
"MIT"
] | null | null | null |
src/model/strict_balance.py
|
CVbluecat/TIPS
|
823974f418bbd970b1f5be07f6ff5de4653f717e
|
[
"MIT"
] | null | null | null |
from queue import Queue
from generateAST import TreeNode
import copy
# tranverse the tree to find the defect node
def BFSGetNode(ast, num):
nodeQ = Queue()
nodeQ.put(ast) # root
while(not nodeQ.empty()):
tmpNode = nodeQ.get()
if tmpNode.beginPoint == num:
return tmpNode
for childNode in tmpNode.children:
nodeQ.put(childNode)
return None
def repair_strict_balance(ast, charno):
for num in charno:
try:
defectNode = BFSGetNode(ast, num)
# find this.balance
thisNode = None
nodeQ = Queue()
nodeQ.put(defectNode) # root
while(not nodeQ.empty()):
tmpNode = nodeQ.get()
if 'value' in tmpNode.attributes.keys() and tmpNode.attributes['value'] == 'this':
thisNode = tmpNode
for childNode in tmpNode.children:
nodeQ.put(childNode)
binaryOpNode = thisNode.father.father
binaryOpNode1 = copy.deepcopy(binaryOpNode)
binaryOpNode2 = copy.deepcopy(binaryOpNode)
# >=
binaryOpNode1.attributes['operator'] = '>='
binaryOpNode2.attributes['operator'] = '<'
origvalNode = binaryOpNode2.children[1] # maybe behind is an expression
tmpBinaryNode = TreeNode('BinaryOperation')
tmpBinaryNode.beginPoint = -1
tmpBinaryNode.attributes = {}
tmpBinaryNode.attributes['operator'] = '+'
oneNode = TreeNode('Literal')
oneNode.attributes = {}
oneNode.attributes['value'] = '1'
oneNode.attributes['token'] = 'number'
tmpBinaryNode.children.append(origvalNode)
tmpBinaryNode.children.append(oneNode)
secTuple = TreeNode('TupleExpression')
secTuple.attributes = {'isInlineArray':False}
secTuple.children.append(tmpBinaryNode)
binaryOpNode2.children[1] = secTuple
binaryOpNodeTotal = TreeNode('BinaryOperation')
binaryOpNodeTotal.attributes = {}
binaryOpNodeTotal.attributes['operator'] = '&&'
tupleNode1 = TreeNode('TupleExpression')
tupleNode1.attributes = {'isInlineArray':False}
tupleNode1.children.append(binaryOpNode1)
tupleNode2 = TreeNode('TupleExpression')
tupleNode2.attributes = {'isInlineArray':False}
tupleNode2.children.append(binaryOpNode2)
binaryOpNodeTotal.children.append(tupleNode1)
binaryOpNodeTotal.children.append(tupleNode2)
upNode = binaryOpNode.father
idx = upNode.children.index(binaryOpNode)
upNode.children[idx] = binaryOpNodeTotal
except:
print('failing to repair the strict balance defect in charnum:' + str(num))
continue
| 38.671053
| 98
| 0.59442
|
078accd634f124f1b431bbddc173f3d152aa0d0e
| 16,846
|
py
|
Python
|
synapse/handlers/account_validity.py
|
warricksothr/synapse
|
1de26b346796ec8d6b51b4395017f8107f640c47
|
[
"Apache-2.0"
] | 2
|
2021-04-22T21:46:34.000Z
|
2021-04-26T16:48:40.000Z
|
synapse/handlers/account_validity.py
|
warricksothr/synapse
|
1de26b346796ec8d6b51b4395017f8107f640c47
|
[
"Apache-2.0"
] | 1
|
2021-09-11T08:34:56.000Z
|
2021-09-11T08:34:56.000Z
|
synapse/handlers/account_validity.py
|
warricksothr/synapse
|
1de26b346796ec8d6b51b4395017f8107f640c47
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import email.mime.multipart
import email.utils
import logging
from typing import TYPE_CHECKING, Awaitable, Callable, List, Optional, Tuple
from twisted.web.http import Request
from synapse.api.errors import AuthError, StoreError, SynapseError
from synapse.metrics.background_process_metrics import wrap_as_background_process
from synapse.types import UserID
from synapse.util import stringutils
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
# Types for callbacks to be registered via the module api
IS_USER_EXPIRED_CALLBACK = Callable[[str], Awaitable[Optional[bool]]]
ON_USER_REGISTRATION_CALLBACK = Callable[[str], Awaitable]
# Temporary hooks to allow for a transition from `/_matrix/client` endpoints
# to `/_synapse/client/account_validity`. See `register_account_validity_callbacks`.
ON_LEGACY_SEND_MAIL_CALLBACK = Callable[[str], Awaitable]
ON_LEGACY_RENEW_CALLBACK = Callable[[str], Awaitable[Tuple[bool, bool, int]]]
ON_LEGACY_ADMIN_REQUEST = Callable[[Request], Awaitable]
class AccountValidityHandler:
def __init__(self, hs: "HomeServer"):
self.hs = hs
self.config = hs.config
self.store = self.hs.get_datastore()
self.send_email_handler = self.hs.get_send_email_handler()
self.clock = self.hs.get_clock()
self._app_name = self.hs.config.email_app_name
self._account_validity_enabled = (
hs.config.account_validity.account_validity_enabled
)
self._account_validity_renew_by_email_enabled = (
hs.config.account_validity.account_validity_renew_by_email_enabled
)
self._account_validity_period = None
if self._account_validity_enabled:
self._account_validity_period = (
hs.config.account_validity.account_validity_period
)
if (
self._account_validity_enabled
and self._account_validity_renew_by_email_enabled
):
# Don't do email-specific configuration if renewal by email is disabled.
self._template_html = (
hs.config.account_validity.account_validity_template_html
)
self._template_text = (
hs.config.account_validity.account_validity_template_text
)
self._renew_email_subject = (
hs.config.account_validity.account_validity_renew_email_subject
)
# Check the renewal emails to send and send them every 30min.
if hs.config.run_background_tasks:
self.clock.looping_call(self._send_renewal_emails, 30 * 60 * 1000)
self._is_user_expired_callbacks: List[IS_USER_EXPIRED_CALLBACK] = []
self._on_user_registration_callbacks: List[ON_USER_REGISTRATION_CALLBACK] = []
self._on_legacy_send_mail_callback: Optional[
ON_LEGACY_SEND_MAIL_CALLBACK
] = None
self._on_legacy_renew_callback: Optional[ON_LEGACY_RENEW_CALLBACK] = None
# The legacy admin requests callback isn't a protected attribute because we need
# to access it from the admin servlet, which is outside of this handler.
self.on_legacy_admin_request_callback: Optional[ON_LEGACY_ADMIN_REQUEST] = None
def register_account_validity_callbacks(
self,
is_user_expired: Optional[IS_USER_EXPIRED_CALLBACK] = None,
on_user_registration: Optional[ON_USER_REGISTRATION_CALLBACK] = None,
on_legacy_send_mail: Optional[ON_LEGACY_SEND_MAIL_CALLBACK] = None,
on_legacy_renew: Optional[ON_LEGACY_RENEW_CALLBACK] = None,
on_legacy_admin_request: Optional[ON_LEGACY_ADMIN_REQUEST] = None,
):
"""Register callbacks from module for each hook."""
if is_user_expired is not None:
self._is_user_expired_callbacks.append(is_user_expired)
if on_user_registration is not None:
self._on_user_registration_callbacks.append(on_user_registration)
# The builtin account validity feature exposes 3 endpoints (send_mail, renew, and
# an admin one). As part of moving the feature into a module, we need to change
# the path from /_matrix/client/unstable/account_validity/... to
# /_synapse/client/account_validity, because:
#
# * the feature isn't part of the Matrix spec thus shouldn't live under /_matrix
# * the way we register servlets means that modules can't register resources
# under /_matrix/client
#
# We need to allow for a transition period between the old and new endpoints
# in order to allow for clients to update (and for emails to be processed).
#
# Once the email-account-validity module is loaded, it will take control of account
# validity by moving the rows from our `account_validity` table into its own table.
#
# Therefore, we need to allow modules (in practice just the one implementing the
# email-based account validity) to temporarily hook into the legacy endpoints so we
# can route the traffic coming into the old endpoints into the module, which is
# why we have the following three temporary hooks.
if on_legacy_send_mail is not None:
if self._on_legacy_send_mail_callback is not None:
raise RuntimeError("Tried to register on_legacy_send_mail twice")
self._on_legacy_send_mail_callback = on_legacy_send_mail
if on_legacy_renew is not None:
if self._on_legacy_renew_callback is not None:
raise RuntimeError("Tried to register on_legacy_renew twice")
self._on_legacy_renew_callback = on_legacy_renew
if on_legacy_admin_request is not None:
if self.on_legacy_admin_request_callback is not None:
raise RuntimeError("Tried to register on_legacy_admin_request twice")
self.on_legacy_admin_request_callback = on_legacy_admin_request
async def is_user_expired(self, user_id: str) -> bool:
"""Checks if a user has expired against third-party modules.
Args:
user_id: The user to check the expiry of.
Returns:
Whether the user has expired.
"""
for callback in self._is_user_expired_callbacks:
expired = await callback(user_id)
if expired is not None:
return expired
if self._account_validity_enabled:
# If no module could determine whether the user has expired and the legacy
# configuration is enabled, fall back to it.
return await self.store.is_account_expired(user_id, self.clock.time_msec())
return False
async def on_user_registration(self, user_id: str):
"""Tell third-party modules about a user's registration.
Args:
user_id: The ID of the newly registered user.
"""
for callback in self._on_user_registration_callbacks:
await callback(user_id)
@wrap_as_background_process("send_renewals")
async def _send_renewal_emails(self) -> None:
"""Gets the list of users whose account is expiring in the amount of time
configured in the ``renew_at`` parameter from the ``account_validity``
configuration, and sends renewal emails to all of these users as long as they
have an email 3PID attached to their account.
"""
expiring_users = await self.store.get_users_expiring_soon()
if expiring_users:
for user in expiring_users:
await self._send_renewal_email(
user_id=user["user_id"], expiration_ts=user["expiration_ts_ms"]
)
async def send_renewal_email_to_user(self, user_id: str) -> None:
"""
Send a renewal email for a specific user.
Args:
user_id: The user ID to send a renewal email for.
Raises:
SynapseError if the user is not set to renew.
"""
# If a module supports sending a renewal email from here, do that, otherwise do
# the legacy dance.
if self._on_legacy_send_mail_callback is not None:
await self._on_legacy_send_mail_callback(user_id)
return
if not self._account_validity_renew_by_email_enabled:
raise AuthError(
403, "Account renewal via email is disabled on this server."
)
expiration_ts = await self.store.get_expiration_ts_for_user(user_id)
# If this user isn't set to be expired, raise an error.
if expiration_ts is None:
raise SynapseError(400, "User has no expiration time: %s" % (user_id,))
await self._send_renewal_email(user_id, expiration_ts)
async def _send_renewal_email(self, user_id: str, expiration_ts: int) -> None:
"""Sends out a renewal email to every email address attached to the given user
with a unique link allowing them to renew their account.
Args:
user_id: ID of the user to send email(s) to.
expiration_ts: Timestamp in milliseconds for the expiration date of
this user's account (used in the email templates).
"""
addresses = await self._get_email_addresses_for_user(user_id)
# Stop right here if the user doesn't have at least one email address.
# In this case, they will have to ask their server admin to renew their
# account manually.
# We don't need to do a specific check to make sure the account isn't
# deactivated, as a deactivated account isn't supposed to have any
# email address attached to it.
if not addresses:
return
try:
user_display_name = await self.store.get_profile_displayname(
UserID.from_string(user_id).localpart
)
if user_display_name is None:
user_display_name = user_id
except StoreError:
user_display_name = user_id
renewal_token = await self._get_renewal_token(user_id)
url = "%s_matrix/client/unstable/account_validity/renew?token=%s" % (
self.hs.config.public_baseurl,
renewal_token,
)
template_vars = {
"display_name": user_display_name,
"expiration_ts": expiration_ts,
"url": url,
}
html_text = self._template_html.render(**template_vars)
plain_text = self._template_text.render(**template_vars)
for address in addresses:
raw_to = email.utils.parseaddr(address)[1]
await self.send_email_handler.send_email(
email_address=raw_to,
subject=self._renew_email_subject,
app_name=self._app_name,
html=html_text,
text=plain_text,
)
await self.store.set_renewal_mail_status(user_id=user_id, email_sent=True)
async def _get_email_addresses_for_user(self, user_id: str) -> List[str]:
"""Retrieve the list of email addresses attached to a user's account.
Args:
user_id: ID of the user to lookup email addresses for.
Returns:
Email addresses for this account.
"""
threepids = await self.store.user_get_threepids(user_id)
addresses = []
for threepid in threepids:
if threepid["medium"] == "email":
addresses.append(threepid["address"])
return addresses
async def _get_renewal_token(self, user_id: str) -> str:
"""Generates a 32-byte long random string that will be inserted into the
user's renewal email's unique link, then saves it into the database.
Args:
user_id: ID of the user to generate a string for.
Returns:
The generated string.
Raises:
StoreError(500): Couldn't generate a unique string after 5 attempts.
"""
attempts = 0
while attempts < 5:
try:
renewal_token = stringutils.random_string(32)
await self.store.set_renewal_token_for_user(user_id, renewal_token)
return renewal_token
except StoreError:
attempts += 1
raise StoreError(500, "Couldn't generate a unique string as refresh string.")
async def renew_account(self, renewal_token: str) -> Tuple[bool, bool, int]:
"""Renews the account attached to a given renewal token by pushing back the
expiration date by the current validity period in the server's configuration.
If it turns out that the token is valid but has already been used, then the
token is considered stale. A token is stale if the 'token_used_ts_ms' db column
is non-null.
This method exists to support handling the legacy account validity /renew
endpoint. If a module implements the on_legacy_renew callback, then this process
is delegated to the module instead.
Args:
renewal_token: Token sent with the renewal request.
Returns:
A tuple containing:
* A bool representing whether the token is valid and unused.
* A bool which is `True` if the token is valid, but stale.
* An int representing the user's expiry timestamp as milliseconds since the
epoch, or 0 if the token was invalid.
"""
# If a module supports triggering a renew from here, do that, otherwise do the
# legacy dance.
if self._on_legacy_renew_callback is not None:
return await self._on_legacy_renew_callback(renewal_token)
try:
(
user_id,
current_expiration_ts,
token_used_ts,
) = await self.store.get_user_from_renewal_token(renewal_token)
except StoreError:
return False, False, 0
# Check whether this token has already been used.
if token_used_ts:
logger.info(
"User '%s' attempted to use previously used token '%s' to renew account",
user_id,
renewal_token,
)
return False, True, current_expiration_ts
logger.debug("Renewing an account for user %s", user_id)
# Renew the account. Pass the renewal_token here so that it is not cleared.
# We want to keep the token around in case the user attempts to renew their
# account with the same token twice (clicking the email link twice).
#
# In that case, the token will be accepted, but the account's expiration ts
# will remain unchanged.
new_expiration_ts = await self.renew_account_for_user(
user_id, renewal_token=renewal_token
)
return True, False, new_expiration_ts
async def renew_account_for_user(
self,
user_id: str,
expiration_ts: Optional[int] = None,
email_sent: bool = False,
renewal_token: Optional[str] = None,
) -> int:
"""Renews the account attached to a given user by pushing back the
expiration date by the current validity period in the server's
configuration.
Args:
user_id: The ID of the user to renew.
expiration_ts: New expiration date. Defaults to now + validity period.
email_sent: Whether an email has been sent for this validity period.
renewal_token: Token sent with the renewal request. The user's token
will be cleared if this is None.
Returns:
New expiration date for this account, as a timestamp in
milliseconds since epoch.
"""
now = self.clock.time_msec()
if expiration_ts is None:
expiration_ts = now + self._account_validity_period
await self.store.set_account_validity_for_user(
user_id=user_id,
expiration_ts=expiration_ts,
email_sent=email_sent,
renewal_token=renewal_token,
token_used_ts=now,
)
return expiration_ts
| 40.88835
| 91
| 0.657664
|
f2438a7574393cdc20a3d3f44a7ecb13bec52e1e
| 51,836
|
py
|
Python
|
tests/test_tenacity.py
|
zbentley/tenacity
|
58fbe614433212e27484f70ed7984f33b1bdb707
|
[
"Apache-2.0"
] | null | null | null |
tests/test_tenacity.py
|
zbentley/tenacity
|
58fbe614433212e27484f70ed7984f33b1bdb707
|
[
"Apache-2.0"
] | null | null | null |
tests/test_tenacity.py
|
zbentley/tenacity
|
58fbe614433212e27484f70ed7984f33b1bdb707
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016–2021 Julien Danjou
# Copyright 2016 Joshua Harlow
# Copyright 2013 Ray Holder
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
import sys
import time
import typing
import unittest
import warnings
from contextlib import contextmanager
from copy import copy
from fractions import Fraction
import pytest
import tenacity
from tenacity import RetryCallState, RetryError, Retrying, retry
_unset = object()
def _make_unset_exception(func_name, **kwargs):
missing = []
for k, v in kwargs.items():
if v is _unset:
missing.append(k)
missing_str = ", ".join(repr(s) for s in missing)
return TypeError(func_name + " func missing parameters: " + missing_str)
def _set_delay_since_start(retry_state, delay):
# Ensure outcome_timestamp - start_time is *exactly* equal to the delay to
# avoid complexity in test code.
retry_state.start_time = Fraction(retry_state.start_time)
retry_state.outcome_timestamp = retry_state.start_time + Fraction(delay)
assert retry_state.seconds_since_start == delay
def make_retry_state(previous_attempt_number, delay_since_first_attempt, last_result=None):
"""Construct RetryCallState for given attempt number & delay.
Only used in testing and thus is extra careful about timestamp arithmetics.
"""
required_parameter_unset = previous_attempt_number is _unset or delay_since_first_attempt is _unset
if required_parameter_unset:
raise _make_unset_exception(
"wait/stop",
previous_attempt_number=previous_attempt_number,
delay_since_first_attempt=delay_since_first_attempt,
)
retry_state = RetryCallState(None, None, (), {})
retry_state.attempt_number = previous_attempt_number
if last_result is not None:
retry_state.outcome = last_result
else:
retry_state.set_result(None)
_set_delay_since_start(retry_state, delay_since_first_attempt)
return retry_state
class TestBase(unittest.TestCase):
def test_repr(self):
class ConcreteRetrying(tenacity.BaseRetrying):
def __call__(self, fn, *args, **kwargs):
pass
repr(ConcreteRetrying())
class TestStopConditions(unittest.TestCase):
def test_never_stop(self):
r = Retrying()
self.assertFalse(r.stop(make_retry_state(3, 6546)))
def test_stop_any(self):
stop = tenacity.stop_any(tenacity.stop_after_delay(1), tenacity.stop_after_attempt(4))
def s(*args):
return stop(make_retry_state(*args))
self.assertFalse(s(1, 0.1))
self.assertFalse(s(2, 0.2))
self.assertFalse(s(2, 0.8))
self.assertTrue(s(4, 0.8))
self.assertTrue(s(3, 1.8))
self.assertTrue(s(4, 1.8))
def test_stop_all(self):
stop = tenacity.stop_all(tenacity.stop_after_delay(1), tenacity.stop_after_attempt(4))
def s(*args):
return stop(make_retry_state(*args))
self.assertFalse(s(1, 0.1))
self.assertFalse(s(2, 0.2))
self.assertFalse(s(2, 0.8))
self.assertFalse(s(4, 0.8))
self.assertFalse(s(3, 1.8))
self.assertTrue(s(4, 1.8))
def test_stop_or(self):
stop = tenacity.stop_after_delay(1) | tenacity.stop_after_attempt(4)
def s(*args):
return stop(make_retry_state(*args))
self.assertFalse(s(1, 0.1))
self.assertFalse(s(2, 0.2))
self.assertFalse(s(2, 0.8))
self.assertTrue(s(4, 0.8))
self.assertTrue(s(3, 1.8))
self.assertTrue(s(4, 1.8))
def test_stop_and(self):
stop = tenacity.stop_after_delay(1) & tenacity.stop_after_attempt(4)
def s(*args):
return stop(make_retry_state(*args))
self.assertFalse(s(1, 0.1))
self.assertFalse(s(2, 0.2))
self.assertFalse(s(2, 0.8))
self.assertFalse(s(4, 0.8))
self.assertFalse(s(3, 1.8))
self.assertTrue(s(4, 1.8))
def test_stop_after_attempt(self):
r = Retrying(stop=tenacity.stop_after_attempt(3))
self.assertFalse(r.stop(make_retry_state(2, 6546)))
self.assertTrue(r.stop(make_retry_state(3, 6546)))
self.assertTrue(r.stop(make_retry_state(4, 6546)))
def test_stop_after_delay(self):
r = Retrying(stop=tenacity.stop_after_delay(1))
self.assertFalse(r.stop(make_retry_state(2, 0.999)))
self.assertTrue(r.stop(make_retry_state(2, 1)))
self.assertTrue(r.stop(make_retry_state(2, 1.001)))
def test_legacy_explicit_stop_type(self):
Retrying(stop="stop_after_attempt")
def test_stop_func_with_retry_state(self):
def stop_func(retry_state):
rs = retry_state
return rs.attempt_number == rs.seconds_since_start
r = Retrying(stop=stop_func)
self.assertFalse(r.stop(make_retry_state(1, 3)))
self.assertFalse(r.stop(make_retry_state(100, 99)))
self.assertTrue(r.stop(make_retry_state(101, 101)))
class TestWaitConditions(unittest.TestCase):
def test_no_sleep(self):
r = Retrying()
self.assertEqual(0, r.wait(make_retry_state(18, 9879)))
def test_fixed_sleep(self):
r = Retrying(wait=tenacity.wait_fixed(1))
self.assertEqual(1, r.wait(make_retry_state(12, 6546)))
def test_incrementing_sleep(self):
r = Retrying(wait=tenacity.wait_incrementing(start=500, increment=100))
self.assertEqual(500, r.wait(make_retry_state(1, 6546)))
self.assertEqual(600, r.wait(make_retry_state(2, 6546)))
self.assertEqual(700, r.wait(make_retry_state(3, 6546)))
def test_random_sleep(self):
r = Retrying(wait=tenacity.wait_random(min=1, max=20))
times = set()
for x in range(1000):
times.add(r.wait(make_retry_state(1, 6546)))
# this is kind of non-deterministic...
self.assertTrue(len(times) > 1)
for t in times:
self.assertTrue(t >= 1)
self.assertTrue(t < 20)
def test_random_sleep_without_min(self):
r = Retrying(wait=tenacity.wait_random(max=2))
times = set()
times.add(r.wait(make_retry_state(1, 6546)))
times.add(r.wait(make_retry_state(1, 6546)))
times.add(r.wait(make_retry_state(1, 6546)))
times.add(r.wait(make_retry_state(1, 6546)))
# this is kind of non-deterministic...
self.assertTrue(len(times) > 1)
for t in times:
self.assertTrue(t >= 0)
self.assertTrue(t <= 2)
def test_exponential(self):
r = Retrying(wait=tenacity.wait_exponential())
self.assertEqual(r.wait(make_retry_state(1, 0)), 1)
self.assertEqual(r.wait(make_retry_state(2, 0)), 2)
self.assertEqual(r.wait(make_retry_state(3, 0)), 4)
self.assertEqual(r.wait(make_retry_state(4, 0)), 8)
self.assertEqual(r.wait(make_retry_state(5, 0)), 16)
self.assertEqual(r.wait(make_retry_state(6, 0)), 32)
self.assertEqual(r.wait(make_retry_state(7, 0)), 64)
self.assertEqual(r.wait(make_retry_state(8, 0)), 128)
def test_exponential_with_max_wait(self):
r = Retrying(wait=tenacity.wait_exponential(max=40))
self.assertEqual(r.wait(make_retry_state(1, 0)), 1)
self.assertEqual(r.wait(make_retry_state(2, 0)), 2)
self.assertEqual(r.wait(make_retry_state(3, 0)), 4)
self.assertEqual(r.wait(make_retry_state(4, 0)), 8)
self.assertEqual(r.wait(make_retry_state(5, 0)), 16)
self.assertEqual(r.wait(make_retry_state(6, 0)), 32)
self.assertEqual(r.wait(make_retry_state(7, 0)), 40)
self.assertEqual(r.wait(make_retry_state(8, 0)), 40)
self.assertEqual(r.wait(make_retry_state(50, 0)), 40)
def test_exponential_with_min_wait(self):
r = Retrying(wait=tenacity.wait_exponential(min=20))
self.assertEqual(r.wait(make_retry_state(1, 0)), 20)
self.assertEqual(r.wait(make_retry_state(2, 0)), 20)
self.assertEqual(r.wait(make_retry_state(3, 0)), 20)
self.assertEqual(r.wait(make_retry_state(4, 0)), 20)
self.assertEqual(r.wait(make_retry_state(5, 0)), 20)
self.assertEqual(r.wait(make_retry_state(6, 0)), 32)
self.assertEqual(r.wait(make_retry_state(7, 0)), 64)
self.assertEqual(r.wait(make_retry_state(8, 0)), 128)
self.assertEqual(r.wait(make_retry_state(20, 0)), 524288)
def test_exponential_with_max_wait_and_multiplier(self):
r = Retrying(wait=tenacity.wait_exponential(max=50, multiplier=1))
self.assertEqual(r.wait(make_retry_state(1, 0)), 1)
self.assertEqual(r.wait(make_retry_state(2, 0)), 2)
self.assertEqual(r.wait(make_retry_state(3, 0)), 4)
self.assertEqual(r.wait(make_retry_state(4, 0)), 8)
self.assertEqual(r.wait(make_retry_state(5, 0)), 16)
self.assertEqual(r.wait(make_retry_state(6, 0)), 32)
self.assertEqual(r.wait(make_retry_state(7, 0)), 50)
self.assertEqual(r.wait(make_retry_state(8, 0)), 50)
self.assertEqual(r.wait(make_retry_state(50, 0)), 50)
def test_exponential_with_min_wait_and_multiplier(self):
r = Retrying(wait=tenacity.wait_exponential(min=20, multiplier=2))
self.assertEqual(r.wait(make_retry_state(1, 0)), 20)
self.assertEqual(r.wait(make_retry_state(2, 0)), 20)
self.assertEqual(r.wait(make_retry_state(3, 0)), 20)
self.assertEqual(r.wait(make_retry_state(4, 0)), 20)
self.assertEqual(r.wait(make_retry_state(5, 0)), 32)
self.assertEqual(r.wait(make_retry_state(6, 0)), 64)
self.assertEqual(r.wait(make_retry_state(7, 0)), 128)
self.assertEqual(r.wait(make_retry_state(8, 0)), 256)
self.assertEqual(r.wait(make_retry_state(20, 0)), 1048576)
def test_exponential_with_min_wait_and_max_wait(self):
r = Retrying(wait=tenacity.wait_exponential(min=10, max=100))
self.assertEqual(r.wait(make_retry_state(1, 0)), 10)
self.assertEqual(r.wait(make_retry_state(2, 0)), 10)
self.assertEqual(r.wait(make_retry_state(3, 0)), 10)
self.assertEqual(r.wait(make_retry_state(4, 0)), 10)
self.assertEqual(r.wait(make_retry_state(5, 0)), 16)
self.assertEqual(r.wait(make_retry_state(6, 0)), 32)
self.assertEqual(r.wait(make_retry_state(7, 0)), 64)
self.assertEqual(r.wait(make_retry_state(8, 0)), 100)
self.assertEqual(r.wait(make_retry_state(9, 0)), 100)
self.assertEqual(r.wait(make_retry_state(20, 0)), 100)
def test_legacy_explicit_wait_type(self):
Retrying(wait="exponential_sleep")
def test_wait_func(self):
def wait_func(retry_state):
return retry_state.attempt_number * retry_state.seconds_since_start
r = Retrying(wait=wait_func)
self.assertEqual(r.wait(make_retry_state(1, 5)), 5)
self.assertEqual(r.wait(make_retry_state(2, 11)), 22)
self.assertEqual(r.wait(make_retry_state(10, 100)), 1000)
def test_wait_combine(self):
r = Retrying(wait=tenacity.wait_combine(tenacity.wait_random(0, 3), tenacity.wait_fixed(5)))
# Test it a few time since it's random
for i in range(1000):
w = r.wait(make_retry_state(1, 5))
self.assertLess(w, 8)
self.assertGreaterEqual(w, 5)
def test_wait_double_sum(self):
r = Retrying(wait=tenacity.wait_random(0, 3) + tenacity.wait_fixed(5))
# Test it a few time since it's random
for i in range(1000):
w = r.wait(make_retry_state(1, 5))
self.assertLess(w, 8)
self.assertGreaterEqual(w, 5)
def test_wait_triple_sum(self):
r = Retrying(wait=tenacity.wait_fixed(1) + tenacity.wait_random(0, 3) + tenacity.wait_fixed(5))
# Test it a few time since it's random
for i in range(1000):
w = r.wait(make_retry_state(1, 5))
self.assertLess(w, 9)
self.assertGreaterEqual(w, 6)
def test_wait_arbitrary_sum(self):
r = Retrying(
wait=sum(
[
tenacity.wait_fixed(1),
tenacity.wait_random(0, 3),
tenacity.wait_fixed(5),
tenacity.wait_none(),
]
)
)
# Test it a few time since it's random
for i in range(1000):
w = r.wait(make_retry_state(1, 5))
self.assertLess(w, 9)
self.assertGreaterEqual(w, 6)
def _assert_range(self, wait, min_, max_):
self.assertLess(wait, max_)
self.assertGreaterEqual(wait, min_)
def _assert_inclusive_range(self, wait, low, high):
self.assertLessEqual(wait, high)
self.assertGreaterEqual(wait, low)
def _assert_inclusive_epsilon(self, wait, target, epsilon):
self.assertLessEqual(wait, target + epsilon)
self.assertGreaterEqual(wait, target - epsilon)
def test_wait_chain(self):
r = Retrying(
wait=tenacity.wait_chain(
*[tenacity.wait_fixed(1) for i in range(2)]
+ [tenacity.wait_fixed(4) for i in range(2)]
+ [tenacity.wait_fixed(8) for i in range(1)]
)
)
for i in range(10):
w = r.wait(make_retry_state(i + 1, 1))
if i < 2:
self._assert_range(w, 1, 2)
elif i < 4:
self._assert_range(w, 4, 5)
else:
self._assert_range(w, 8, 9)
def test_wait_chain_multiple_invocations(self):
sleep_intervals = []
r = Retrying(
sleep=sleep_intervals.append,
wait=tenacity.wait_chain(*[tenacity.wait_fixed(i + 1) for i in range(3)]),
stop=tenacity.stop_after_attempt(5),
retry=tenacity.retry_if_result(lambda x: x == 1),
)
@r.wraps
def always_return_1():
return 1
self.assertRaises(tenacity.RetryError, always_return_1)
self.assertEqual(sleep_intervals, [1.0, 2.0, 3.0, 3.0])
sleep_intervals[:] = []
# Clear and restart retrying.
self.assertRaises(tenacity.RetryError, always_return_1)
self.assertEqual(sleep_intervals, [1.0, 2.0, 3.0, 3.0])
sleep_intervals[:] = []
def test_wait_random_exponential(self):
fn = tenacity.wait_random_exponential(0.5, 60.0)
for _ in range(1000):
self._assert_inclusive_range(fn(make_retry_state(1, 0)), 0, 0.5)
self._assert_inclusive_range(fn(make_retry_state(2, 0)), 0, 1.0)
self._assert_inclusive_range(fn(make_retry_state(3, 0)), 0, 2.0)
self._assert_inclusive_range(fn(make_retry_state(4, 0)), 0, 4.0)
self._assert_inclusive_range(fn(make_retry_state(5, 0)), 0, 8.0)
self._assert_inclusive_range(fn(make_retry_state(6, 0)), 0, 16.0)
self._assert_inclusive_range(fn(make_retry_state(7, 0)), 0, 32.0)
self._assert_inclusive_range(fn(make_retry_state(8, 0)), 0, 60.0)
self._assert_inclusive_range(fn(make_retry_state(9, 0)), 0, 60.0)
fn = tenacity.wait_random_exponential(10, 5)
for _ in range(1000):
self._assert_inclusive_range(fn(make_retry_state(1, 0)), 0.00, 5.00)
# Default arguments exist
fn = tenacity.wait_random_exponential()
fn(make_retry_state(0, 0))
def test_wait_random_exponential_statistically(self):
fn = tenacity.wait_random_exponential(0.5, 60.0)
attempt = []
for i in range(10):
attempt.append([fn(make_retry_state(i, 0)) for _ in range(4000)])
def mean(lst):
return float(sum(lst)) / float(len(lst))
# skipping attempt 0
self._assert_inclusive_epsilon(mean(attempt[1]), 0.25, 0.02)
self._assert_inclusive_epsilon(mean(attempt[2]), 0.50, 0.04)
self._assert_inclusive_epsilon(mean(attempt[3]), 1, 0.08)
self._assert_inclusive_epsilon(mean(attempt[4]), 2, 0.16)
self._assert_inclusive_epsilon(mean(attempt[5]), 4, 0.32)
self._assert_inclusive_epsilon(mean(attempt[6]), 8, 0.64)
self._assert_inclusive_epsilon(mean(attempt[7]), 16, 1.28)
self._assert_inclusive_epsilon(mean(attempt[8]), 30, 2.56)
self._assert_inclusive_epsilon(mean(attempt[9]), 30, 2.56)
def test_wait_retry_state_attributes(self):
class ExtractCallState(Exception):
pass
# retry_state is mutable, so return it as an exception to extract the
# exact values it has when wait is called and bypass any other logic.
def waitfunc(retry_state):
raise ExtractCallState(retry_state)
retrying = Retrying(
wait=waitfunc,
retry=(tenacity.retry_if_exception_type() | tenacity.retry_if_result(lambda result: result == 123)),
)
def returnval():
return 123
try:
retrying(returnval)
except ExtractCallState as err:
retry_state = err.args[0]
self.assertIs(retry_state.fn, returnval)
self.assertEqual(retry_state.args, ())
self.assertEqual(retry_state.kwargs, {})
self.assertEqual(retry_state.outcome.result(), 123)
self.assertEqual(retry_state.attempt_number, 1)
self.assertGreaterEqual(retry_state.outcome_timestamp, retry_state.start_time)
def dying():
raise Exception("Broken")
try:
retrying(dying)
except ExtractCallState as err:
retry_state = err.args[0]
self.assertIs(retry_state.fn, dying)
self.assertEqual(retry_state.args, ())
self.assertEqual(retry_state.kwargs, {})
self.assertEqual(str(retry_state.outcome.exception()), "Broken")
self.assertEqual(retry_state.attempt_number, 1)
self.assertGreaterEqual(retry_state.outcome_timestamp, retry_state.start_time)
class TestRetryConditions(unittest.TestCase):
def test_retry_if_result(self):
retry = tenacity.retry_if_result(lambda x: x == 1)
def r(fut):
retry_state = make_retry_state(1, 1.0, last_result=fut)
return retry(retry_state)
self.assertTrue(r(tenacity.Future.construct(1, 1, False)))
self.assertFalse(r(tenacity.Future.construct(1, 2, False)))
def test_retry_if_not_result(self):
retry = tenacity.retry_if_not_result(lambda x: x == 1)
def r(fut):
retry_state = make_retry_state(1, 1.0, last_result=fut)
return retry(retry_state)
self.assertTrue(r(tenacity.Future.construct(1, 2, False)))
self.assertFalse(r(tenacity.Future.construct(1, 1, False)))
def test_retry_any(self):
retry = tenacity.retry_any(
tenacity.retry_if_result(lambda x: x == 1),
tenacity.retry_if_result(lambda x: x == 2),
)
def r(fut):
retry_state = make_retry_state(1, 1.0, last_result=fut)
return retry(retry_state)
self.assertTrue(r(tenacity.Future.construct(1, 1, False)))
self.assertTrue(r(tenacity.Future.construct(1, 2, False)))
self.assertFalse(r(tenacity.Future.construct(1, 3, False)))
self.assertFalse(r(tenacity.Future.construct(1, 1, True)))
def test_retry_all(self):
retry = tenacity.retry_all(
tenacity.retry_if_result(lambda x: x == 1),
tenacity.retry_if_result(lambda x: isinstance(x, int)),
)
def r(fut):
retry_state = make_retry_state(1, 1.0, last_result=fut)
return retry(retry_state)
self.assertTrue(r(tenacity.Future.construct(1, 1, False)))
self.assertFalse(r(tenacity.Future.construct(1, 2, False)))
self.assertFalse(r(tenacity.Future.construct(1, 3, False)))
self.assertFalse(r(tenacity.Future.construct(1, 1, True)))
def test_retry_and(self):
retry = tenacity.retry_if_result(lambda x: x == 1) & tenacity.retry_if_result(lambda x: isinstance(x, int))
def r(fut):
retry_state = make_retry_state(1, 1.0, last_result=fut)
return retry(retry_state)
self.assertTrue(r(tenacity.Future.construct(1, 1, False)))
self.assertFalse(r(tenacity.Future.construct(1, 2, False)))
self.assertFalse(r(tenacity.Future.construct(1, 3, False)))
self.assertFalse(r(tenacity.Future.construct(1, 1, True)))
def test_retry_or(self):
retry = tenacity.retry_if_result(lambda x: x == "foo") | tenacity.retry_if_result(lambda x: isinstance(x, int))
def r(fut):
retry_state = make_retry_state(1, 1.0, last_result=fut)
return retry(retry_state)
self.assertTrue(r(tenacity.Future.construct(1, "foo", False)))
self.assertFalse(r(tenacity.Future.construct(1, "foobar", False)))
self.assertFalse(r(tenacity.Future.construct(1, 2.2, False)))
self.assertFalse(r(tenacity.Future.construct(1, 42, True)))
def _raise_try_again(self):
self._attempts += 1
if self._attempts < 3:
raise tenacity.TryAgain
def test_retry_try_again(self):
self._attempts = 0
Retrying(stop=tenacity.stop_after_attempt(5), retry=tenacity.retry_never)(self._raise_try_again)
self.assertEqual(3, self._attempts)
def test_retry_try_again_forever(self):
def _r():
raise tenacity.TryAgain
r = Retrying(stop=tenacity.stop_after_attempt(5), retry=tenacity.retry_never)
self.assertRaises(tenacity.RetryError, r, _r)
self.assertEqual(5, r.statistics["attempt_number"])
def test_retry_try_again_forever_reraise(self):
def _r():
raise tenacity.TryAgain
r = Retrying(
stop=tenacity.stop_after_attempt(5),
retry=tenacity.retry_never,
reraise=True,
)
self.assertRaises(tenacity.TryAgain, r, _r)
self.assertEqual(5, r.statistics["attempt_number"])
def test_retry_if_exception_message_negative_no_inputs(self):
with self.assertRaises(TypeError):
tenacity.retry_if_exception_message()
def test_retry_if_exception_message_negative_too_many_inputs(self):
with self.assertRaises(TypeError):
tenacity.retry_if_exception_message(message="negative", match="negative")
class NoneReturnUntilAfterCount:
"""Holds counter state for invoking a method several times in a row."""
def __init__(self, count):
self.counter = 0
self.count = count
def go(self):
"""Return None until after count threshold has been crossed.
Then return True.
"""
if self.counter < self.count:
self.counter += 1
return None
return True
class NoIOErrorAfterCount:
"""Holds counter state for invoking a method several times in a row."""
def __init__(self, count):
self.counter = 0
self.count = count
def go(self):
"""Raise an IOError until after count threshold has been crossed.
Then return True.
"""
if self.counter < self.count:
self.counter += 1
raise IOError("Hi there, I'm an IOError")
return True
class NoNameErrorAfterCount:
"""Holds counter state for invoking a method several times in a row."""
def __init__(self, count):
self.counter = 0
self.count = count
def go(self):
"""Raise a NameError until after count threshold has been crossed.
Then return True.
"""
if self.counter < self.count:
self.counter += 1
raise NameError("Hi there, I'm a NameError")
return True
class NameErrorUntilCount:
"""Holds counter state for invoking a method several times in a row."""
derived_message = "Hi there, I'm a NameError"
def __init__(self, count):
self.counter = 0
self.count = count
def go(self):
"""Return True until after count threshold has been crossed.
Then raise a NameError.
"""
if self.counter < self.count:
self.counter += 1
return True
raise NameError(self.derived_message)
class IOErrorUntilCount:
"""Holds counter state for invoking a method several times in a row."""
def __init__(self, count):
self.counter = 0
self.count = count
def go(self):
"""Return True until after count threshold has been crossed.
Then raise an IOError.
"""
if self.counter < self.count:
self.counter += 1
return True
raise IOError("Hi there, I'm an IOError")
class CustomError(Exception):
"""This is a custom exception class.
Note that For Python 2.x, we don't strictly need to extend BaseException,
however, Python 3.x will complain. While this test suite won't run
correctly under Python 3.x without extending from the Python exception
hierarchy, the actual module code is backwards compatible Python 2.x and
will allow for cases where exception classes don't extend from the
hierarchy.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
class NoCustomErrorAfterCount:
"""Holds counter state for invoking a method several times in a row."""
derived_message = "This is a Custom exception class"
def __init__(self, count):
self.counter = 0
self.count = count
def go(self):
"""Raise a CustomError until after count threshold has been crossed.
Then return True.
"""
if self.counter < self.count:
self.counter += 1
raise CustomError(self.derived_message)
return True
class CapturingHandler(logging.Handler):
"""Captures log records for inspection."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.records = []
def emit(self, record):
self.records.append(record)
def current_time_ms():
return int(round(time.time() * 1000))
@retry(
wait=tenacity.wait_fixed(0.05),
retry=tenacity.retry_if_result(lambda result: result is None),
)
def _retryable_test_with_wait(thing):
return thing.go()
@retry(
stop=tenacity.stop_after_attempt(3),
retry=tenacity.retry_if_result(lambda result: result is None),
)
def _retryable_test_with_stop(thing):
return thing.go()
@retry(retry=tenacity.retry_if_exception_type(IOError))
def _retryable_test_with_exception_type_io(thing):
return thing.go()
@retry(retry=tenacity.retry_if_not_exception_type(IOError))
def _retryable_test_if_not_exception_type_io(thing):
return thing.go()
@retry(stop=tenacity.stop_after_attempt(3), retry=tenacity.retry_if_exception_type(IOError))
def _retryable_test_with_exception_type_io_attempt_limit(thing):
return thing.go()
@retry(retry=tenacity.retry_unless_exception_type(NameError))
def _retryable_test_with_unless_exception_type_name(thing):
return thing.go()
@retry(
stop=tenacity.stop_after_attempt(3),
retry=tenacity.retry_unless_exception_type(NameError),
)
def _retryable_test_with_unless_exception_type_name_attempt_limit(thing):
return thing.go()
@retry(retry=tenacity.retry_unless_exception_type())
def _retryable_test_with_unless_exception_type_no_input(thing):
return thing.go()
@retry(
stop=tenacity.stop_after_attempt(5),
retry=tenacity.retry_if_exception_message(message=NoCustomErrorAfterCount.derived_message),
)
def _retryable_test_if_exception_message_message(thing):
return thing.go()
@retry(retry=tenacity.retry_if_not_exception_message(message=NoCustomErrorAfterCount.derived_message))
def _retryable_test_if_not_exception_message_message(thing):
return thing.go()
@retry(retry=tenacity.retry_if_exception_message(match=NoCustomErrorAfterCount.derived_message[:3] + ".*"))
def _retryable_test_if_exception_message_match(thing):
return thing.go()
@retry(retry=tenacity.retry_if_not_exception_message(match=NoCustomErrorAfterCount.derived_message[:3] + ".*"))
def _retryable_test_if_not_exception_message_match(thing):
return thing.go()
@retry(retry=tenacity.retry_if_not_exception_message(message=NameErrorUntilCount.derived_message))
def _retryable_test_not_exception_message_delay(thing):
return thing.go()
@retry
def _retryable_default(thing):
return thing.go()
@retry()
def _retryable_default_f(thing):
return thing.go()
@retry(retry=tenacity.retry_if_exception_type(CustomError))
def _retryable_test_with_exception_type_custom(thing):
return thing.go()
@retry(
stop=tenacity.stop_after_attempt(3),
retry=tenacity.retry_if_exception_type(CustomError),
)
def _retryable_test_with_exception_type_custom_attempt_limit(thing):
return thing.go()
class TestDecoratorWrapper(unittest.TestCase):
def test_with_wait(self):
start = current_time_ms()
result = _retryable_test_with_wait(NoneReturnUntilAfterCount(5))
t = current_time_ms() - start
self.assertGreaterEqual(t, 250)
self.assertTrue(result)
def test_with_stop_on_return_value(self):
try:
_retryable_test_with_stop(NoneReturnUntilAfterCount(5))
self.fail("Expected RetryError after 3 attempts")
except RetryError as re:
self.assertFalse(re.last_attempt.failed)
self.assertEqual(3, re.last_attempt.attempt_number)
self.assertTrue(re.last_attempt.result() is None)
print(re)
def test_with_stop_on_exception(self):
try:
_retryable_test_with_stop(NoIOErrorAfterCount(5))
self.fail("Expected IOError")
except IOError as re:
self.assertTrue(isinstance(re, IOError))
print(re)
def test_retry_if_exception_of_type(self):
self.assertTrue(_retryable_test_with_exception_type_io(NoIOErrorAfterCount(5)))
try:
_retryable_test_with_exception_type_io(NoNameErrorAfterCount(5))
self.fail("Expected NameError")
except NameError as n:
self.assertTrue(isinstance(n, NameError))
print(n)
self.assertTrue(_retryable_test_with_exception_type_custom(NoCustomErrorAfterCount(5)))
try:
_retryable_test_with_exception_type_custom(NoNameErrorAfterCount(5))
self.fail("Expected NameError")
except NameError as n:
self.assertTrue(isinstance(n, NameError))
print(n)
def test_retry_except_exception_of_type(self):
self.assertTrue(_retryable_test_if_not_exception_type_io(NoNameErrorAfterCount(5)))
try:
_retryable_test_if_not_exception_type_io(NoIOErrorAfterCount(5))
self.fail("Expected IOError")
except IOError as err:
self.assertTrue(isinstance(err, IOError))
print(err)
def test_retry_until_exception_of_type_attempt_number(self):
try:
self.assertTrue(_retryable_test_with_unless_exception_type_name(NameErrorUntilCount(5)))
except NameError as e:
s = _retryable_test_with_unless_exception_type_name.retry.statistics
self.assertTrue(s["attempt_number"] == 6)
print(e)
else:
self.fail("Expected NameError")
def test_retry_until_exception_of_type_no_type(self):
try:
# no input should catch all subclasses of Exception
self.assertTrue(_retryable_test_with_unless_exception_type_no_input(NameErrorUntilCount(5)))
except NameError as e:
s = _retryable_test_with_unless_exception_type_no_input.retry.statistics
self.assertTrue(s["attempt_number"] == 6)
print(e)
else:
self.fail("Expected NameError")
def test_retry_until_exception_of_type_wrong_exception(self):
try:
# two iterations with IOError, one that returns True
_retryable_test_with_unless_exception_type_name_attempt_limit(IOErrorUntilCount(2))
self.fail("Expected RetryError")
except RetryError as e:
self.assertTrue(isinstance(e, RetryError))
print(e)
def test_retry_if_exception_message(self):
try:
self.assertTrue(_retryable_test_if_exception_message_message(NoCustomErrorAfterCount(3)))
except CustomError:
print(_retryable_test_if_exception_message_message.retry.statistics)
self.fail("CustomError should've been retried from errormessage")
def test_retry_if_not_exception_message(self):
try:
self.assertTrue(_retryable_test_if_not_exception_message_message(NoCustomErrorAfterCount(2)))
except CustomError:
s = _retryable_test_if_not_exception_message_message.retry.statistics
self.assertTrue(s["attempt_number"] == 1)
def test_retry_if_not_exception_message_delay(self):
try:
self.assertTrue(_retryable_test_not_exception_message_delay(NameErrorUntilCount(3)))
except NameError:
s = _retryable_test_not_exception_message_delay.retry.statistics
print(s["attempt_number"])
self.assertTrue(s["attempt_number"] == 4)
def test_retry_if_exception_message_match(self):
try:
self.assertTrue(_retryable_test_if_exception_message_match(NoCustomErrorAfterCount(3)))
except CustomError:
self.fail("CustomError should've been retried from errormessage")
def test_retry_if_not_exception_message_match(self):
try:
self.assertTrue(_retryable_test_if_not_exception_message_message(NoCustomErrorAfterCount(2)))
except CustomError:
s = _retryable_test_if_not_exception_message_message.retry.statistics
self.assertTrue(s["attempt_number"] == 1)
def test_defaults(self):
self.assertTrue(_retryable_default(NoNameErrorAfterCount(5)))
self.assertTrue(_retryable_default_f(NoNameErrorAfterCount(5)))
self.assertTrue(_retryable_default(NoCustomErrorAfterCount(5)))
self.assertTrue(_retryable_default_f(NoCustomErrorAfterCount(5)))
def test_retry_function_object(self):
"""Test that funсtools.wraps doesn't cause problems with callable objects.
It raises an error upon trying to wrap it in Py2, because __name__
attribute is missing. It's fixed in Py3 but was never backported.
"""
class Hello:
def __call__(self):
return "Hello"
retrying = Retrying(wait=tenacity.wait_fixed(0.01), stop=tenacity.stop_after_attempt(3))
h = retrying.wraps(Hello())
self.assertEqual(h(), "Hello")
class TestRetryWith:
def test_redefine_wait(self):
start = current_time_ms()
result = _retryable_test_with_wait.retry_with(wait=tenacity.wait_fixed(0.1))(NoneReturnUntilAfterCount(5))
t = current_time_ms() - start
assert t >= 500
assert result is True
def test_redefine_stop(self):
result = _retryable_test_with_stop.retry_with(stop=tenacity.stop_after_attempt(5))(NoneReturnUntilAfterCount(4))
assert result is True
def test_retry_error_cls_should_be_preserved(self):
@retry(stop=tenacity.stop_after_attempt(10), retry_error_cls=ValueError)
def _retryable():
raise Exception("raised for test purposes")
with pytest.raises(Exception) as exc_ctx:
_retryable.retry_with(stop=tenacity.stop_after_attempt(2))()
assert exc_ctx.type is ValueError, "Should remap to specific exception type"
def test_retry_error_callback_should_be_preserved(self):
def return_text(retry_state):
return "Calling %s keeps raising errors after %s attempts" % (
retry_state.fn.__name__,
retry_state.attempt_number,
)
@retry(stop=tenacity.stop_after_attempt(10), retry_error_callback=return_text)
def _retryable():
raise Exception("raised for test purposes")
result = _retryable.retry_with(stop=tenacity.stop_after_attempt(5))()
assert result == "Calling _retryable keeps raising errors after 5 attempts"
class TestBeforeAfterAttempts(unittest.TestCase):
_attempt_number = 0
def test_before_attempts(self):
TestBeforeAfterAttempts._attempt_number = 0
def _before(retry_state):
TestBeforeAfterAttempts._attempt_number = retry_state.attempt_number
@retry(
wait=tenacity.wait_fixed(1),
stop=tenacity.stop_after_attempt(1),
before=_before,
)
def _test_before():
pass
_test_before()
self.assertTrue(TestBeforeAfterAttempts._attempt_number == 1)
def test_after_attempts(self):
TestBeforeAfterAttempts._attempt_number = 0
def _after(retry_state):
TestBeforeAfterAttempts._attempt_number = retry_state.attempt_number
@retry(
wait=tenacity.wait_fixed(0.1),
stop=tenacity.stop_after_attempt(3),
after=_after,
)
def _test_after():
if TestBeforeAfterAttempts._attempt_number < 2:
raise Exception("testing after_attempts handler")
else:
pass
_test_after()
self.assertTrue(TestBeforeAfterAttempts._attempt_number == 2)
def test_before_sleep(self):
def _before_sleep(retry_state):
self.assertGreater(retry_state.next_action.sleep, 0)
_before_sleep.attempt_number = retry_state.attempt_number
@retry(
wait=tenacity.wait_fixed(0.01),
stop=tenacity.stop_after_attempt(3),
before_sleep=_before_sleep,
)
def _test_before_sleep():
if _before_sleep.attempt_number < 2:
raise Exception("testing before_sleep_attempts handler")
_test_before_sleep()
self.assertEqual(_before_sleep.attempt_number, 2)
def _before_sleep_log_raises(self, get_call_fn):
thing = NoIOErrorAfterCount(2)
logger = logging.getLogger(self.id())
logger.propagate = False
logger.setLevel(logging.INFO)
handler = CapturingHandler()
logger.addHandler(handler)
try:
_before_sleep = tenacity.before_sleep_log(logger, logging.INFO)
retrying = Retrying(
wait=tenacity.wait_fixed(0.01),
stop=tenacity.stop_after_attempt(3),
before_sleep=_before_sleep,
)
get_call_fn(retrying)(thing.go)
finally:
logger.removeHandler(handler)
etalon_re = r"^Retrying .* in 0\.01 seconds as it raised " r"(IO|OS)Error: Hi there, I'm an IOError\.$"
self.assertEqual(len(handler.records), 2)
fmt = logging.Formatter().format
self.assertRegex(fmt(handler.records[0]), etalon_re)
self.assertRegex(fmt(handler.records[1]), etalon_re)
def test_before_sleep_log_raises(self):
self._before_sleep_log_raises(lambda x: x)
def test_before_sleep_log_raises_with_exc_info(self):
thing = NoIOErrorAfterCount(2)
logger = logging.getLogger(self.id())
logger.propagate = False
logger.setLevel(logging.INFO)
handler = CapturingHandler()
logger.addHandler(handler)
try:
_before_sleep = tenacity.before_sleep_log(logger, logging.INFO, exc_info=True)
retrying = Retrying(
wait=tenacity.wait_fixed(0.01),
stop=tenacity.stop_after_attempt(3),
before_sleep=_before_sleep,
)
retrying(thing.go)
finally:
logger.removeHandler(handler)
etalon_re = re.compile(
r"^Retrying .* in 0\.01 seconds as it raised "
r"(IO|OS)Error: Hi there, I'm an IOError\.{0}"
r"Traceback \(most recent call last\):{0}"
r".*$".format("\n"),
flags=re.MULTILINE,
)
self.assertEqual(len(handler.records), 2)
fmt = logging.Formatter().format
self.assertRegex(fmt(handler.records[0]), etalon_re)
self.assertRegex(fmt(handler.records[1]), etalon_re)
def test_before_sleep_log_returns(self, exc_info=False):
thing = NoneReturnUntilAfterCount(2)
logger = logging.getLogger(self.id())
logger.propagate = False
logger.setLevel(logging.INFO)
handler = CapturingHandler()
logger.addHandler(handler)
try:
_before_sleep = tenacity.before_sleep_log(logger, logging.INFO, exc_info=exc_info)
_retry = tenacity.retry_if_result(lambda result: result is None)
retrying = Retrying(
wait=tenacity.wait_fixed(0.01),
stop=tenacity.stop_after_attempt(3),
retry=_retry,
before_sleep=_before_sleep,
)
retrying(thing.go)
finally:
logger.removeHandler(handler)
etalon_re = r"^Retrying .* in 0\.01 seconds as it returned None\.$"
self.assertEqual(len(handler.records), 2)
fmt = logging.Formatter().format
self.assertRegex(fmt(handler.records[0]), etalon_re)
self.assertRegex(fmt(handler.records[1]), etalon_re)
def test_before_sleep_log_returns_with_exc_info(self):
self.test_before_sleep_log_returns(exc_info=True)
class TestReraiseExceptions(unittest.TestCase):
def test_reraise_by_default(self):
calls = []
@retry(
wait=tenacity.wait_fixed(0.1),
stop=tenacity.stop_after_attempt(2),
reraise=True,
)
def _reraised_by_default():
calls.append("x")
raise KeyError("Bad key")
self.assertRaises(KeyError, _reraised_by_default)
self.assertEqual(2, len(calls))
def test_reraise_from_retry_error(self):
calls = []
@retry(wait=tenacity.wait_fixed(0.1), stop=tenacity.stop_after_attempt(2))
def _raise_key_error():
calls.append("x")
raise KeyError("Bad key")
def _reraised_key_error():
try:
_raise_key_error()
except tenacity.RetryError as retry_err:
retry_err.reraise()
self.assertRaises(KeyError, _reraised_key_error)
self.assertEqual(2, len(calls))
def test_reraise_timeout_from_retry_error(self):
calls = []
@retry(
wait=tenacity.wait_fixed(0.1),
stop=tenacity.stop_after_attempt(2),
retry=lambda retry_state: True,
)
def _mock_fn():
calls.append("x")
def _reraised_mock_fn():
try:
_mock_fn()
except tenacity.RetryError as retry_err:
retry_err.reraise()
self.assertRaises(tenacity.RetryError, _reraised_mock_fn)
self.assertEqual(2, len(calls))
def test_reraise_no_exception(self):
calls = []
@retry(
wait=tenacity.wait_fixed(0.1),
stop=tenacity.stop_after_attempt(2),
retry=lambda retry_state: True,
reraise=True,
)
def _mock_fn():
calls.append("x")
self.assertRaises(tenacity.RetryError, _mock_fn)
self.assertEqual(2, len(calls))
class TestStatistics(unittest.TestCase):
def test_stats(self):
@retry()
def _foobar():
return 42
self.assertEqual({}, _foobar.retry.statistics)
_foobar()
self.assertEqual(1, _foobar.retry.statistics["attempt_number"])
def test_stats_failing(self):
@retry(stop=tenacity.stop_after_attempt(2))
def _foobar():
raise ValueError(42)
self.assertEqual({}, _foobar.retry.statistics)
try:
_foobar()
except Exception: # noqa: B902
pass
self.assertEqual(2, _foobar.retry.statistics["attempt_number"])
class TestRetryErrorCallback(unittest.TestCase):
def setUp(self):
self._attempt_number = 0
self._callback_called = False
def _callback(self, fut):
self._callback_called = True
return fut
def test_retry_error_callback(self):
num_attempts = 3
def retry_error_callback(retry_state):
retry_error_callback.called_times += 1
return retry_state.outcome
retry_error_callback.called_times = 0
@retry(
stop=tenacity.stop_after_attempt(num_attempts),
retry_error_callback=retry_error_callback,
)
def _foobar():
self._attempt_number += 1
raise Exception("This exception should not be raised")
result = _foobar()
self.assertEqual(retry_error_callback.called_times, 1)
self.assertEqual(num_attempts, self._attempt_number)
self.assertIsInstance(result, tenacity.Future)
class TestContextManager(unittest.TestCase):
def test_context_manager_retry_one(self):
from tenacity import Retrying
raise_ = True
for attempt in Retrying():
with attempt:
if raise_:
raise_ = False
raise Exception("Retry it!")
def test_context_manager_on_error(self):
from tenacity import Retrying
class CustomError(Exception):
pass
retry = Retrying(retry=tenacity.retry_if_exception_type(IOError))
def test():
for attempt in retry:
with attempt:
raise CustomError("Don't retry!")
self.assertRaises(CustomError, test)
def test_context_manager_retry_error(self):
from tenacity import Retrying
retry = Retrying(stop=tenacity.stop_after_attempt(2))
def test():
for attempt in retry:
with attempt:
raise Exception("Retry it!")
self.assertRaises(RetryError, test)
def test_context_manager_reraise(self):
from tenacity import Retrying
class CustomError(Exception):
pass
retry = Retrying(reraise=True, stop=tenacity.stop_after_attempt(2))
def test():
for attempt in retry:
with attempt:
raise CustomError("Don't retry!")
self.assertRaises(CustomError, test)
class TestInvokeAsCallable:
"""Test direct invocation of Retrying as a callable."""
@staticmethod
def invoke(retry, f):
"""
Invoke Retrying logic.
Wrapper allows testing different call mechanisms in test sub-classes.
"""
return retry(f)
def test_retry_one(self):
def f():
f.calls.append(len(f.calls) + 1)
if len(f.calls) <= 1:
raise Exception("Retry it!")
return 42
f.calls = []
retry = Retrying()
assert self.invoke(retry, f) == 42
assert f.calls == [1, 2]
def test_on_error(self):
class CustomError(Exception):
pass
def f():
f.calls.append(len(f.calls) + 1)
if len(f.calls) <= 1:
raise CustomError("Don't retry!")
return 42
f.calls = []
retry = Retrying(retry=tenacity.retry_if_exception_type(IOError))
with pytest.raises(CustomError):
self.invoke(retry, f)
assert f.calls == [1]
def test_retry_error(self):
def f():
f.calls.append(len(f.calls) + 1)
raise Exception("Retry it!")
f.calls = []
retry = Retrying(stop=tenacity.stop_after_attempt(2))
with pytest.raises(RetryError):
self.invoke(retry, f)
assert f.calls == [1, 2]
def test_reraise(self):
class CustomError(Exception):
pass
def f():
f.calls.append(len(f.calls) + 1)
raise CustomError("Retry it!")
f.calls = []
retry = Retrying(reraise=True, stop=tenacity.stop_after_attempt(2))
with pytest.raises(CustomError):
self.invoke(retry, f)
assert f.calls == [1, 2]
class TestRetryException(unittest.TestCase):
def test_retry_error_is_pickleable(self):
import pickle
expected = RetryError(last_attempt=123)
pickled = pickle.dumps(expected)
actual = pickle.loads(pickled)
self.assertEqual(expected.last_attempt, actual.last_attempt)
class TestRetryTyping(unittest.TestCase):
@pytest.mark.skipif(sys.version_info < (3, 0), reason="typeguard not supported for python 2")
def test_retry_type_annotations(self):
"""The decorator should maintain types of decorated functions."""
# Just in case this is run with unit-test, return early for py2
if sys.version_info < (3, 0):
return
# Function-level import because we can't install this for python 2.
from typeguard import check_type
def num_to_str(number):
# type: (int) -> str
return str(number)
# equivalent to a raw @retry decoration
with_raw = retry(num_to_str)
with_raw_result = with_raw(1)
# equivalent to a @retry(...) decoration
with_constructor = retry()(num_to_str)
with_constructor_result = with_raw(1)
# These raise TypeError exceptions if they fail
check_type("with_raw", with_raw, typing.Callable[[int], str])
check_type("with_raw_result", with_raw_result, str)
check_type("with_constructor", with_constructor, typing.Callable[[int], str])
check_type("with_constructor_result", with_constructor_result, str)
@contextmanager
def reports_deprecation_warning():
__tracebackhide__ = True
oldfilters = copy(warnings.filters)
warnings.simplefilter("always")
try:
with pytest.warns(DeprecationWarning):
yield
finally:
warnings.filters = oldfilters
class TestMockingSleep:
RETRY_ARGS = dict(
wait=tenacity.wait_fixed(0.1),
stop=tenacity.stop_after_attempt(5),
)
def _fail(self):
raise NotImplementedError()
@retry(**RETRY_ARGS)
def _decorated_fail(self):
self._fail()
@pytest.fixture()
def mock_sleep(self, monkeypatch):
class MockSleep:
call_count = 0
def __call__(self, seconds):
self.call_count += 1
sleep = MockSleep()
monkeypatch.setattr(tenacity.nap.time, "sleep", sleep)
yield sleep
def test_decorated(self, mock_sleep):
with pytest.raises(RetryError):
self._decorated_fail()
assert mock_sleep.call_count == 4
def test_decorated_retry_with(self, mock_sleep):
fail_faster = self._decorated_fail.retry_with(
stop=tenacity.stop_after_attempt(2),
)
with pytest.raises(RetryError):
fail_faster()
assert mock_sleep.call_count == 1
if __name__ == "__main__":
unittest.main()
| 34.580387
| 120
| 0.648121
|
58feafe71d00210353b53cbd086de2a6339f9331
| 425
|
py
|
Python
|
web/accounts/urls.py
|
samuelfirst/nemoobot
|
b74ad66d4f2052eaba14e4b79e20c3da274b5909
|
[
"MIT"
] | 1
|
2021-01-30T09:19:37.000Z
|
2021-01-30T09:19:37.000Z
|
web/accounts/urls.py
|
samuelfirst/nemoobot
|
b74ad66d4f2052eaba14e4b79e20c3da274b5909
|
[
"MIT"
] | 2
|
2020-12-21T20:57:19.000Z
|
2021-01-26T08:08:09.000Z
|
web/accounts/urls.py
|
samuelfirst/nemoobot
|
b74ad66d4f2052eaba14e4b79e20c3da274b5909
|
[
"MIT"
] | 1
|
2020-12-22T07:42:42.000Z
|
2020-12-22T07:42:42.000Z
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('signup/', views.signup, name='signup'),
path(
'connect_to_twitch/',
views.connect_to_twicth,
name='connect_to_twitch'
),
path('profile/', views.profile, name='profile'),
path('settings/', views.settings, name='settings'),
path('logs/', views.logs, name='logs'),
]
| 26.5625
| 55
| 0.621176
|
0808352ccd16110d1969f1288992d0198bf5ff7b
| 7,803
|
py
|
Python
|
www/sshop/views/Shop.py
|
Tiaonmmn/CISCN_2019
|
beb4a97320c9d66d6aa56831b2752a6af3fec141
|
[
"Unlicense"
] | null | null | null |
www/sshop/views/Shop.py
|
Tiaonmmn/CISCN_2019
|
beb4a97320c9d66d6aa56831b2752a6af3fec141
|
[
"Unlicense"
] | null | null | null |
www/sshop/views/Shop.py
|
Tiaonmmn/CISCN_2019
|
beb4a97320c9d66d6aa56831b2752a6af3fec141
|
[
"Unlicense"
] | 1
|
2020-07-17T14:17:50.000Z
|
2020-07-17T14:17:50.000Z
|
# -*- coding:utf-8 -*-
import tornado.web
from sqlalchemy.orm.exc import NoResultFound
import urllib
import unicodedata
from sshop.base import BaseHandler
from sshop.models import Commodity, User, Shopcar, BanIP
from sshop.settings import limit
import traceback
import random
import sys
reload(sys)
sys.setdefaultencoding('utf8')
class ShopIndexHandler(BaseHandler):
def get(self, *args, **kwargs):
try:
ip = self.orm.query(BanIP).filter(BanIP.ip == self.request.remote_ip).one()
ip.count += 1
self.orm.commit()
except NoResultFound:
self.orm.add(BanIP(ip=self.request.remote_ip, count=1))
self.orm.commit()
try:
ip = self.orm.query(BanIP).filter(BanIP.ip == self.request.remote_ip).one()
if ip.count >= 50000:
return self.finish("Get out here.F**k you!")
except:
pass
return self.redirect('/shop')
class ShopListHandler(BaseHandler):
def get(self):
try:
ip = self.orm.query(BanIP).filter(BanIP.ip == self.request.remote_ip).one()
ip.count += 1
self.orm.commit()
except NoResultFound:
self.orm.add(BanIP(ip=self.request.remote_ip, count=1))
self.orm.commit()
try:
ip = self.orm.query(BanIP).filter(BanIP.ip == self.request.remote_ip).one()
if ip.count >= 50000:
return self.finish("Get out here.F**k you!")
except:
pass
page = self.get_argument('page', 1)
page = int(page) if int(page) else 1
commoditys = self.orm.query(Commodity) \
.filter(Commodity.amount > 0) \
.order_by(Commodity.price.desc()) \
.limit(limit).offset((page - 1) * limit).all()
return self.render('index.html', commoditys=commoditys, preview=page - 1, next=page + 1, limit=limit)
class ShopDetailHandler(BaseHandler):
@tornado.web.authenticated
def get(self, id=1):
try:
commodity = self.orm.query(Commodity) \
.filter(Commodity.id == int(id)).one()
except NoResultFound:
return self.redirect('/')
return self.render('info.html', commodity=commodity)
class ShopPayHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
return self.finish("F**k you!")
def post(self):
try:
price = self.get_argument('price')
user = self.orm.query(User).filter(User.username == self.current_user).one()
if user.integral < float(price):
return self.render('pay.html', danger=1)
user.integral = user.pay(float(price))
self.orm.commit()
shopcar = Shopcar()
shopcar.name = self.orm.query(Commodity).filter(Commodity.price == price).one()
shopcar.amount = self.orm.query(Shopcar).filter(Shopcar.name == shopcar.name.name).first()
shopcar.amount.amount += 1
self.orm.commit()
if shopcar.name.name == 'flag':
return self.render('pay.html', success=1, flag=True)
else:
return self.render('pay.html', success=1)
except:
traceback.print_exc()
return self.render('pay.html', danger=1)
class ShopCarHandler(BaseHandler):
@tornado.web.authenticated
def get(self, *args, **kwargs):
return self.render('shopcar.html',danger=1,dangermessage="It's deprecated.No use.")
@tornado.web.authenticated
def post(self, *args, **kwargs):
return self.render('shopcar.html',danger=1,dangermessage="It's deprecated.No use.")
class ShopCarAddHandler(BaseHandler):
def post(self, *args, **kwargs):
id = self.get_argument('id')
self.set_secure_cookie('commodity_id', id)
return self.redirect('/shopcar')
class ChargeHandler(BaseHandler):
@tornado.web.authenticated
def get(self, *args, **kwargs):
page = self.get_argument('page', 1)
page = int(page) if int(page) else 1
commoditys = self.orm.query(Commodity) \
.filter(Commodity.amount > 0) \
.order_by(Commodity.price.desc()) \
.limit(limit).offset((page - 1) * limit).all()
return self.render('charge.html', commoditys=commoditys, preview=page - 1, next=page + 1, limit=limit)
def post(self, *args, **kwargs):
page = self.get_argument('page', 1)
page = int(page) if int(page) else 1
commoditys = self.orm.query(Commodity) \
.filter(Shopcar.amount > 0) \
.order_by(Shopcar.id.desc()) \
.limit(limit).offset((page - 1) * limit).all()
id = self.get_argument('id')
price = str(self.get_argument('price'))
try:
price = urllib.unquote(price).decode('utf-8')
except UnicodeDecodeError:
return self.render('charge.html', danger=1, commoditys=commoditys, preview=page - 1, next=page + 1,
limit=limit,
dangermessage="汝听,人言乎?")
if len(price) > 1:
return self.render('charge.html', danger=1, commoditys=commoditys, preview=page - 1, next=page + 1,
limit=limit, dangermessage="ATM机坏了,只能收一位数的钱。")
try:
unicodedata.numeric(price)
except ValueError:
return self.render('charge.html', danger=1, commoditys=commoditys, preview=page - 1, next=page + 1,
limit=limit,
dangermessage="汝听,人言乎?")
# return self.render('charge.html', danger=1, commoditys=commoditys, preview=page - 1, next=page + 1,
# limit=limit,
# dangermessage="测试专用。当前输入字符为:{0},其Unicode名称为:{1},其Unicode numeric为:{2}".format(price,
# unicodedata.name(
# price),unicodedata.numeric(price)))
try:
commoditys = self.orm.query(Commodity).filter(Commodity.id == id).one()
except NoResultFound:
return self.render('charge.html', danger=1, commoditys=commoditys, preview=page - 1, next=page + 1,
limit=limit,
dangermessage="亲,这边建议您不要搞事情哦。ヽ(✿゚▽゚)ノ")
if commoditys.name=='flag':
if unicodedata.numeric(price)>=commoditys.price:
return self.render('pay.html', success=1, flag=True)
else:
page = self.get_argument('page', 1)
page = int(page) if int(page) else 1
commoditys = self.orm.query(Commodity) \
.filter(Shopcar.amount > 0) \
.order_by(Shopcar.id.desc()) \
.limit(limit).offset((page - 1) * limit).all()
return self.render('charge.html', danger=1, commoditys=commoditys, preview=page - 1, next=page + 1,
limit=limit,
dangermessage="充钱才能变得更强。")
else:
page = self.get_argument('page', 1)
page = int(page) if int(page) else 1
commoditys = self.orm.query(Commodity) \
.filter(Shopcar.amount > 0) \
.order_by(Shopcar.id.desc()) \
.limit(limit).offset((page - 1) * limit).all()
return self.render('charge.html', danger=1, commoditys=commoditys, preview=page - 1, next=page + 1,
limit=limit,
dangermessage="买错商品了,亲。")
| 42.639344
| 146
| 0.550173
|
916260c4cee7e8796f3cb995b381a8493368e461
| 20,424
|
py
|
Python
|
PythonAPI/carissma_project/lib/python3.5/site-packages/pandas/tests/indexes/period/test_construction.py
|
AbdulHoffmann/carla_carissma
|
8d382769ffa02a6c61a22c57160285505f5ff0a4
|
[
"MIT"
] | 6,989
|
2017-07-18T06:23:18.000Z
|
2022-03-31T15:58:36.000Z
|
PythonAPI/carissma_project/lib/python3.5/site-packages/pandas/tests/indexes/period/test_construction.py
|
AbdulHoffmann/carla_carissma
|
8d382769ffa02a6c61a22c57160285505f5ff0a4
|
[
"MIT"
] | 1,978
|
2017-07-18T09:17:58.000Z
|
2022-03-31T14:28:43.000Z
|
PythonAPI/carissma_project/lib/python3.5/site-packages/pandas/tests/indexes/period/test_construction.py
|
AbdulHoffmann/carla_carissma
|
8d382769ffa02a6c61a22c57160285505f5ff0a4
|
[
"MIT"
] | 1,228
|
2017-07-18T09:03:13.000Z
|
2022-03-29T05:57:40.000Z
|
import numpy as np
import pytest
from pandas.compat import PY3, lmap, lrange, text_type
from pandas.core.dtypes.dtypes import PeriodDtype
import pandas as pd
from pandas import (
Index, Period, PeriodIndex, Series, date_range, offsets, period_range)
import pandas.core.indexes.period as period
import pandas.util.testing as tm
class TestPeriodIndex(object):
def setup_method(self, method):
pass
def test_construction_base_constructor(self):
# GH 13664
arr = [pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.PeriodIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Period('2011-03', freq='M')]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.PeriodIndex(np.array(arr)))
arr = [pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='D')]
tm.assert_index_equal(pd.Index(arr), pd.Index(arr, dtype=object))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.Index(np.array(arr), dtype=object))
def test_constructor_use_start_freq(self):
# GH #1118
p = Period('4/2/2012', freq='B')
with tm.assert_produces_warning(FutureWarning):
index = PeriodIndex(start=p, periods=10)
expected = period_range(start='4/2/2012', periods=10, freq='B')
tm.assert_index_equal(index, expected)
index = period_range(start=p, periods=10)
tm.assert_index_equal(index, expected)
def test_constructor_field_arrays(self):
# GH #1264
years = np.arange(1990, 2010).repeat(4)[2:-2]
quarters = np.tile(np.arange(1, 5), 20)[2:-2]
index = PeriodIndex(year=years, quarter=quarters, freq='Q-DEC')
expected = period_range('1990Q3', '2009Q2', freq='Q-DEC')
tm.assert_index_equal(index, expected)
index2 = PeriodIndex(year=years, quarter=quarters, freq='2Q-DEC')
tm.assert_numpy_array_equal(index.asi8, index2.asi8)
index = PeriodIndex(year=years, quarter=quarters)
tm.assert_index_equal(index, expected)
years = [2007, 2007, 2007]
months = [1, 2]
pytest.raises(ValueError, PeriodIndex, year=years, month=months,
freq='M')
pytest.raises(ValueError, PeriodIndex, year=years, month=months,
freq='2M')
pytest.raises(ValueError, PeriodIndex, year=years, month=months,
freq='M', start=Period('2007-01', freq='M'))
years = [2007, 2007, 2007]
months = [1, 2, 3]
idx = PeriodIndex(year=years, month=months, freq='M')
exp = period_range('2007-01', periods=3, freq='M')
tm.assert_index_equal(idx, exp)
def test_constructor_U(self):
# U was used as undefined period
pytest.raises(ValueError, period_range, '2007-1-1', periods=500,
freq='X')
def test_constructor_nano(self):
idx = period_range(start=Period(ordinal=1, freq='N'),
end=Period(ordinal=4, freq='N'), freq='N')
exp = PeriodIndex([Period(ordinal=1, freq='N'),
Period(ordinal=2, freq='N'),
Period(ordinal=3, freq='N'),
Period(ordinal=4, freq='N')], freq='N')
tm.assert_index_equal(idx, exp)
def test_constructor_arrays_negative_year(self):
years = np.arange(1960, 2000, dtype=np.int64).repeat(4)
quarters = np.tile(np.array([1, 2, 3, 4], dtype=np.int64), 40)
pindex = PeriodIndex(year=years, quarter=quarters)
tm.assert_index_equal(pindex.year, pd.Index(years))
tm.assert_index_equal(pindex.quarter, pd.Index(quarters))
def test_constructor_invalid_quarters(self):
pytest.raises(ValueError, PeriodIndex, year=lrange(2000, 2004),
quarter=lrange(4), freq='Q-DEC')
def test_constructor_corner(self):
pytest.raises(ValueError, PeriodIndex, periods=10, freq='A')
start = Period('2007', freq='A-JUN')
end = Period('2010', freq='A-DEC')
pytest.raises(ValueError, PeriodIndex, start=start, end=end)
pytest.raises(ValueError, PeriodIndex, start=start)
pytest.raises(ValueError, PeriodIndex, end=end)
result = period_range('2007-01', periods=10.5, freq='M')
exp = period_range('2007-01', periods=10, freq='M')
tm.assert_index_equal(result, exp)
def test_constructor_fromarraylike(self):
idx = period_range('2007-01', periods=20, freq='M')
# values is an array of Period, thus can retrieve freq
tm.assert_index_equal(PeriodIndex(idx.values), idx)
tm.assert_index_equal(PeriodIndex(list(idx.values)), idx)
pytest.raises(ValueError, PeriodIndex, idx._ndarray_values)
pytest.raises(ValueError, PeriodIndex, list(idx._ndarray_values))
pytest.raises(TypeError, PeriodIndex,
data=Period('2007', freq='A'))
result = PeriodIndex(iter(idx))
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx)
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx, freq='M')
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx, freq=offsets.MonthEnd())
tm.assert_index_equal(result, idx)
assert result.freq == 'M'
result = PeriodIndex(idx, freq='2M')
tm.assert_index_equal(result, idx.asfreq('2M'))
assert result.freq == '2M'
result = PeriodIndex(idx, freq=offsets.MonthEnd(2))
tm.assert_index_equal(result, idx.asfreq('2M'))
assert result.freq == '2M'
result = PeriodIndex(idx, freq='D')
exp = idx.asfreq('D', 'e')
tm.assert_index_equal(result, exp)
def test_constructor_datetime64arr(self):
vals = np.arange(100000, 100000 + 10000, 100, dtype=np.int64)
vals = vals.view(np.dtype('M8[us]'))
pytest.raises(ValueError, PeriodIndex, vals, freq='D')
@pytest.mark.parametrize('box', [None, 'series', 'index'])
def test_constructor_datetime64arr_ok(self, box):
# https://github.com/pandas-dev/pandas/issues/23438
data = pd.date_range('2017', periods=4, freq="M")
if box is None:
data = data._values
elif box == 'series':
data = pd.Series(data)
result = PeriodIndex(data, freq='D')
expected = PeriodIndex([
'2017-01-31', '2017-02-28', '2017-03-31', '2017-04-30'
], freq="D")
tm.assert_index_equal(result, expected)
def test_constructor_dtype(self):
# passing a dtype with a tz should localize
idx = PeriodIndex(['2013-01', '2013-03'], dtype='period[M]')
exp = PeriodIndex(['2013-01', '2013-03'], freq='M')
tm.assert_index_equal(idx, exp)
assert idx.dtype == 'period[M]'
idx = PeriodIndex(['2013-01-05', '2013-03-05'], dtype='period[3D]')
exp = PeriodIndex(['2013-01-05', '2013-03-05'], freq='3D')
tm.assert_index_equal(idx, exp)
assert idx.dtype == 'period[3D]'
# if we already have a freq and its not the same, then asfreq
# (not changed)
idx = PeriodIndex(['2013-01-01', '2013-01-02'], freq='D')
res = PeriodIndex(idx, dtype='period[M]')
exp = PeriodIndex(['2013-01', '2013-01'], freq='M')
tm.assert_index_equal(res, exp)
assert res.dtype == 'period[M]'
res = PeriodIndex(idx, freq='M')
tm.assert_index_equal(res, exp)
assert res.dtype == 'period[M]'
msg = 'specified freq and dtype are different'
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(['2011-01'], freq='M', dtype='period[D]')
def test_constructor_empty(self):
idx = pd.PeriodIndex([], freq='M')
assert isinstance(idx, PeriodIndex)
assert len(idx) == 0
assert idx.freq == 'M'
with pytest.raises(ValueError, match='freq not specified'):
pd.PeriodIndex([])
def test_constructor_pi_nat(self):
idx = PeriodIndex([Period('2011-01', freq='M'), pd.NaT,
Period('2011-01', freq='M')])
exp = PeriodIndex(['2011-01', 'NaT', '2011-01'], freq='M')
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(np.array([Period('2011-01', freq='M'), pd.NaT,
Period('2011-01', freq='M')]))
tm.assert_index_equal(idx, exp)
idx = PeriodIndex([pd.NaT, pd.NaT, Period('2011-01', freq='M'),
Period('2011-01', freq='M')])
exp = PeriodIndex(['NaT', 'NaT', '2011-01', '2011-01'], freq='M')
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(np.array([pd.NaT, pd.NaT,
Period('2011-01', freq='M'),
Period('2011-01', freq='M')]))
tm.assert_index_equal(idx, exp)
idx = PeriodIndex([pd.NaT, pd.NaT, '2011-01', '2011-01'], freq='M')
tm.assert_index_equal(idx, exp)
with pytest.raises(ValueError, match='freq not specified'):
PeriodIndex([pd.NaT, pd.NaT])
with pytest.raises(ValueError, match='freq not specified'):
PeriodIndex(np.array([pd.NaT, pd.NaT]))
with pytest.raises(ValueError, match='freq not specified'):
PeriodIndex(['NaT', 'NaT'])
with pytest.raises(ValueError, match='freq not specified'):
PeriodIndex(np.array(['NaT', 'NaT']))
def test_constructor_incompat_freq(self):
msg = "Input has different freq=D from PeriodIndex\\(freq=M\\)"
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex([Period('2011-01', freq='M'), pd.NaT,
Period('2011-01', freq='D')])
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(np.array([Period('2011-01', freq='M'), pd.NaT,
Period('2011-01', freq='D')]))
# first element is pd.NaT
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex([pd.NaT, Period('2011-01', freq='M'),
Period('2011-01', freq='D')])
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(np.array([pd.NaT, Period('2011-01', freq='M'),
Period('2011-01', freq='D')]))
def test_constructor_mixed(self):
idx = PeriodIndex(['2011-01', pd.NaT, Period('2011-01', freq='M')])
exp = PeriodIndex(['2011-01', 'NaT', '2011-01'], freq='M')
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(['NaT', pd.NaT, Period('2011-01', freq='M')])
exp = PeriodIndex(['NaT', 'NaT', '2011-01'], freq='M')
tm.assert_index_equal(idx, exp)
idx = PeriodIndex([Period('2011-01-01', freq='D'), pd.NaT,
'2012-01-01'])
exp = PeriodIndex(['2011-01-01', 'NaT', '2012-01-01'], freq='D')
tm.assert_index_equal(idx, exp)
def test_constructor_simple_new(self):
idx = period_range('2007-01', name='p', periods=2, freq='M')
result = idx._simple_new(idx, name='p', freq=idx.freq)
tm.assert_index_equal(result, idx)
result = idx._simple_new(idx.astype('i8'), name='p', freq=idx.freq)
tm.assert_index_equal(result, idx)
def test_constructor_simple_new_empty(self):
# GH13079
idx = PeriodIndex([], freq='M', name='p')
result = idx._simple_new(idx, name='p', freq='M')
tm.assert_index_equal(result, idx)
@pytest.mark.parametrize('floats', [[1.1, 2.1], np.array([1.1, 2.1])])
def test_constructor_floats(self, floats):
with pytest.raises(TypeError):
pd.PeriodIndex._simple_new(floats, freq='M')
with pytest.raises(TypeError):
pd.PeriodIndex(floats, freq='M')
def test_constructor_nat(self):
pytest.raises(ValueError, period_range, start='NaT',
end='2011-01-01', freq='M')
pytest.raises(ValueError, period_range, start='2011-01-01',
end='NaT', freq='M')
def test_constructor_year_and_quarter(self):
year = pd.Series([2001, 2002, 2003])
quarter = year - 2000
idx = PeriodIndex(year=year, quarter=quarter)
strs = ['%dQ%d' % t for t in zip(quarter, year)]
lops = list(map(Period, strs))
p = PeriodIndex(lops)
tm.assert_index_equal(p, idx)
@pytest.mark.parametrize('func, warning', [
(PeriodIndex, FutureWarning),
(period_range, None)
])
def test_constructor_freq_mult(self, func, warning):
# GH #7811
with tm.assert_produces_warning(warning):
# must be the same, but for sure...
pidx = func(start='2014-01', freq='2M', periods=4)
expected = PeriodIndex(['2014-01', '2014-03',
'2014-05', '2014-07'], freq='2M')
tm.assert_index_equal(pidx, expected)
with tm.assert_produces_warning(warning):
pidx = func(start='2014-01-02', end='2014-01-15', freq='3D')
expected = PeriodIndex(['2014-01-02', '2014-01-05',
'2014-01-08', '2014-01-11',
'2014-01-14'], freq='3D')
tm.assert_index_equal(pidx, expected)
with tm.assert_produces_warning(warning):
pidx = func(end='2014-01-01 17:00', freq='4H', periods=3)
expected = PeriodIndex(['2014-01-01 09:00', '2014-01-01 13:00',
'2014-01-01 17:00'], freq='4H')
tm.assert_index_equal(pidx, expected)
msg = ('Frequency must be positive, because it'
' represents span: -1M')
with pytest.raises(ValueError, match=msg):
PeriodIndex(['2011-01'], freq='-1M')
msg = ('Frequency must be positive, because it' ' represents span: 0M')
with pytest.raises(ValueError, match=msg):
PeriodIndex(['2011-01'], freq='0M')
msg = ('Frequency must be positive, because it' ' represents span: 0M')
with pytest.raises(ValueError, match=msg):
period_range('2011-01', periods=3, freq='0M')
@pytest.mark.parametrize('freq', ['A', 'M', 'D', 'T', 'S'])
@pytest.mark.parametrize('mult', [1, 2, 3, 4, 5])
def test_constructor_freq_mult_dti_compat(self, mult, freq):
freqstr = str(mult) + freq
pidx = period_range(start='2014-04-01', freq=freqstr, periods=10)
expected = date_range(start='2014-04-01', freq=freqstr,
periods=10).to_period(freqstr)
tm.assert_index_equal(pidx, expected)
def test_constructor_freq_combined(self):
for freq in ['1D1H', '1H1D']:
pidx = PeriodIndex(['2016-01-01', '2016-01-02'], freq=freq)
expected = PeriodIndex(['2016-01-01 00:00', '2016-01-02 00:00'],
freq='25H')
for freq in ['1D1H', '1H1D']:
pidx = period_range(start='2016-01-01', periods=2, freq=freq)
expected = PeriodIndex(['2016-01-01 00:00', '2016-01-02 01:00'],
freq='25H')
tm.assert_index_equal(pidx, expected)
def test_constructor_range_based_deprecated(self):
with tm.assert_produces_warning(FutureWarning):
pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
assert len(pi) == 9
def test_constructor_range_based_deprecated_different_freq(self):
with tm.assert_produces_warning(FutureWarning) as m:
PeriodIndex(start='2000', periods=2)
warning, = m
assert 'freq="A-DEC"' in str(warning.message)
def test_constructor(self):
pi = period_range(freq='A', start='1/1/2001', end='12/1/2009')
assert len(pi) == 9
pi = period_range(freq='Q', start='1/1/2001', end='12/1/2009')
assert len(pi) == 4 * 9
pi = period_range(freq='M', start='1/1/2001', end='12/1/2009')
assert len(pi) == 12 * 9
pi = period_range(freq='D', start='1/1/2001', end='12/31/2009')
assert len(pi) == 365 * 9 + 2
pi = period_range(freq='B', start='1/1/2001', end='12/31/2009')
assert len(pi) == 261 * 9
pi = period_range(freq='H', start='1/1/2001', end='12/31/2001 23:00')
assert len(pi) == 365 * 24
pi = period_range(freq='Min', start='1/1/2001', end='1/1/2001 23:59')
assert len(pi) == 24 * 60
pi = period_range(freq='S', start='1/1/2001', end='1/1/2001 23:59:59')
assert len(pi) == 24 * 60 * 60
start = Period('02-Apr-2005', 'B')
i1 = period_range(start=start, periods=20)
assert len(i1) == 20
assert i1.freq == start.freq
assert i1[0] == start
end_intv = Period('2006-12-31', 'W')
i1 = period_range(end=end_intv, periods=10)
assert len(i1) == 10
assert i1.freq == end_intv.freq
assert i1[-1] == end_intv
end_intv = Period('2006-12-31', '1w')
i2 = period_range(end=end_intv, periods=10)
assert len(i1) == len(i2)
assert (i1 == i2).all()
assert i1.freq == i2.freq
end_intv = Period('2006-12-31', ('w', 1))
i2 = period_range(end=end_intv, periods=10)
assert len(i1) == len(i2)
assert (i1 == i2).all()
assert i1.freq == i2.freq
end_intv = Period('2005-05-01', 'B')
i1 = period_range(start=start, end=end_intv)
# infer freq from first element
i2 = PeriodIndex([end_intv, Period('2005-05-05', 'B')])
assert len(i2) == 2
assert i2[0] == end_intv
i2 = PeriodIndex(np.array([end_intv, Period('2005-05-05', 'B')]))
assert len(i2) == 2
assert i2[0] == end_intv
# Mixed freq should fail
vals = [end_intv, Period('2006-12-31', 'w')]
pytest.raises(ValueError, PeriodIndex, vals)
vals = np.array(vals)
pytest.raises(ValueError, PeriodIndex, vals)
def test_constructor_error(self):
start = Period('02-Apr-2005', 'B')
end_intv = Period('2006-12-31', ('w', 1))
msg = 'start and end must have same freq'
with pytest.raises(ValueError, match=msg):
PeriodIndex(start=start, end=end_intv)
msg = ('Of the three parameters: start, end, and periods, '
'exactly two must be specified')
with pytest.raises(ValueError, match=msg):
PeriodIndex(start=start)
@pytest.mark.parametrize('freq', ['M', 'Q', 'A', 'D', 'B',
'T', 'S', 'L', 'U', 'N', 'H'])
def test_recreate_from_data(self, freq):
org = period_range(start='2001/04/01', freq=freq, periods=1)
idx = PeriodIndex(org.values, freq=freq)
tm.assert_index_equal(idx, org)
def test_map_with_string_constructor(self):
raw = [2005, 2007, 2009]
index = PeriodIndex(raw, freq='A')
types = str,
if PY3:
# unicode
types += text_type,
for t in types:
expected = Index(lmap(t, raw))
res = index.map(t)
# should return an Index
assert isinstance(res, Index)
# preserve element types
assert all(isinstance(resi, t) for resi in res)
# lastly, values should compare equal
tm.assert_index_equal(res, expected)
class TestSeriesPeriod(object):
def setup_method(self, method):
self.series = Series(period_range('2000-01-01', periods=10, freq='D'))
def test_constructor_cant_cast_period(self):
with pytest.raises(TypeError):
Series(period_range('2000-01-01', periods=10, freq='D'),
dtype=float)
def test_constructor_cast_object(self):
s = Series(period_range('1/1/2000', periods=10),
dtype=PeriodDtype("D"))
exp = Series(period_range('1/1/2000', periods=10))
tm.assert_series_equal(s, exp)
| 39.276923
| 79
| 0.578976
|
f9adecd8fabbd495baeaad029830fe4ee2d12dbe
| 31,917
|
py
|
Python
|
deucevalere/client/valere.py
|
BenjamenMeyer/deuce-valere
|
e9ba7d96825e81e51ddd5f99f1464990969a8f07
|
[
"Apache-2.0"
] | null | null | null |
deucevalere/client/valere.py
|
BenjamenMeyer/deuce-valere
|
e9ba7d96825e81e51ddd5f99f1464990969a8f07
|
[
"Apache-2.0"
] | null | null | null |
deucevalere/client/valere.py
|
BenjamenMeyer/deuce-valere
|
e9ba7d96825e81e51ddd5f99f1464990969a8f07
|
[
"Apache-2.0"
] | null | null | null |
"""
Deuce Valere - Client - Valere
"""
import datetime
import logging
from deuceclient.api import Block
from deuceclient.common import errors as deuce_errors
from stoplight import validate
from deucevalere.common.validation import *
from deucevalere.common.validation_instance import *
class ValereClient(object):
@validate(deuce_client=ClientRule,
vault=VaultInstanceRule,
manager=ValereManagerRule)
def __init__(self, deuce_client, vault, manager):
self.deuceclient = deuce_client
self.vault = vault
self.manager = manager
self.log = logging.getLogger(__name__)
def get_block_list(self):
"""Fill the Manager's list of current blocks
"""
self.manager.metadata.current = []
marker = self.manager.start_block
self.log.info('Project ID: {0}, Vault {1} - '
'Searching for Blocks [{2}, {3})'
.format(self.vault.project_id,
self.vault.vault_id,
marker,
self.manager.end_block))
while True:
for block_id in self.deuceclient.GetBlockList(self.vault,
marker=marker):
if self.manager.end_block is not None:
if block_id < self.manager.end_block:
self.manager.metadata.current.append(block_id)
else:
break
else:
self.manager.metadata.current.append(block_id)
marker = self.vault.blocks.marker
self.log.debug('Next Marker: {0}'.format(marker))
if marker is None:
break
@staticmethod
@validate(metadata_id=MetadataBlockIdRuleNoneOkay)
def _convert_metadata_id_to_storage_id(metadata_id):
"""Return a 'valid' storage id that is the lowest possible value
Note: This will need to be updated when the format of the
storage id changes.
Note: It is impossible to create a UUID that is guarateed to be the
lowest possible value using a UUID generator. However, UUIDs
are alphanumeric values; thus '0' is always the lowest value
and filling out a formatstring that looks like a UUID string
but uses all zeros (0) will giveus a string that will compare
as the lowest possible UUID value.
"""
if metadata_id is not None:
return '{0:}_{1:}-{2:}-{2:}-{2:}-{3:}'.format(metadata_id,
'{:08X}'.format(0),
'{:04X}'.format(0),
'{:012X}'.format(0))
else:
return None
def get_storage_list(self):
"""Fill the manager's list of current storage blocks
"""
self.manager.storage.current = []
start_marker = ValereClient._convert_metadata_id_to_storage_id(
self.manager.start_block)
end_marker = ValereClient._convert_metadata_id_to_storage_id(
self.manager.end_block) if self.manager.end_block else None
self.log.info('Project ID: {0}, Vault {1} - '
'Searching for Storage Blocks [{2}, {3})'
.format(self.vault.project_id,
self.vault.vault_id,
start_marker,
end_marker))
while True:
for block_id in self.deuceclient.GetBlockStorageList(
self.vault, marker=start_marker):
if end_marker is not None:
if block_id < end_marker:
self.manager.storage.current.append(block_id)
else:
break
else:
self.manager.storage.current.append(block_id)
start_marker = self.vault.storageblocks.marker
self.log.debug('Next Marker: {0}'.format(start_marker))
if start_marker is None:
break
def validate_metadata(self):
"""Validate a block
Access each block through a HEAD operation
Checks if the reference count is zero
For blocks with zero reference counts mark them as expired if they
pass the age specified by the manager
"""
# Note: THis function is a little more verbose since it is detecting
# which blocks to delete.
if self.manager.metadata.current is None:
self.get_block_list()
if len(self.manager.metadata.current) == 0:
self.get_block_list()
if self.manager.metadata.expired is None:
self.manager.metadata.expired = []
# Loop over any known blocks and validate them
for block_id in self.manager.metadata.current:
self.log.debug('Project ID {0}, Vault {1} - '
'Validating Block: {2}'
.format(self.vault.project_id,
self.vault.vault_id,
block_id))
try:
# Access the block so that Deuce validates it all internally
block = self.deuceclient.HeadBlock(self.vault,
self.vault.blocks[block_id])
except deuce_errors.MissingBlockError as missing_ex:
self.log.warn('Project ID {0}, Vault {1} - '
'Block {2} error missing storage block '
.format(self.vault.project_id,
self.vault.vault_id,
block_id))
self.manager.missing_counter.add(1, 0)
block = None
except Exception as ex:
# if there was a problem just mark the block as None so it
# get ignored for this iteration of the loop
self.log.warn('Project ID {0}, Vault {1} - '
'Block {2} error heading block ({3}): {4}'
.format(self.vault.project_id,
self.vault.vault_id,
block_id,
type(ex),
str(ex)))
block = None
# if there was a problem then go to the next block_id
if block is None:
self.log.warn('Project ID {0}, Vault {1} - '
'Block {2} no block data to analyze'
.format(self.vault.project_id,
self.vault.vault_id,
block_id))
continue
# Now check if the block has any references
if int(block.ref_count) == 0:
self.log.warn('Project ID {0}, Vault {1} - '
'Block {2} has no references'
.format(self.vault.project_id,
self.vault.vault_id,
block_id))
# Try to calculate the age of the block since it was
# last modified
block_age = datetime.datetime.utcnow() - \
datetime.datetime.utcfromtimestamp(block.ref_modified)
self.log.warn('Project ID {0}, Vault {1} - '
'Block {2} has age {3}'
.format(self.vault.project_id,
self.vault.vault_id,
block_id,
block_age))
# If the block age is beyond the threshold then mark it
# for deletion
if block_age > self.manager.expire_age:
self.log.info('Project ID {0}, Vault {1} - '
'Found Expired Block: {2}'
.format(self.vault.project_id,
self.vault.vault_id,
block_id))
# If we have already marked it for deletion then
# do not add it a second time; try to keep the list
# to a minimum
if block_id not in self.manager.metadata.expired:
self.manager.expired_counter.add(1, len(block))
self.manager.metadata.expired.append(block_id)
else:
self.log.warn('Project ID {0}, Vault {1} - '
'Block {2} has {3} references'
.format(self.vault.project_id,
self.vault.vault_id,
block_id,
block.ref_count))
for block_id in self.manager.metadata.expired:
# there _should_ only be one instance of a block_id in the
# current list; but loop over it just in case since remove()
# only removes the first instance it finds
while block_id in self.manager.metadata.current:
self.manager.metadata.current.remove(block_id)
def cleanup_expired_blocks(self):
"""Delete expired blocks
Attempt to delete each expired block
On success, adds the block to a list of deleted blocks and removes
the block id from the list of expired blocks
Note: A block deletion may fail for numerous reasons including that
the block received a change in its reference count to being
non-zero between when it was detected as being expired and
when the deletion operation occurred. This is a designed
feature of Deuce so that blocks are not accidentally or
improperly removed.
"""
# Note: This function must be very verbose in its logging since it
# it removing user data that will not be recoverable from
# within Deuce
# Check if there is a list to operate on; if not throw an error
if self.manager.metadata.expired is not None:
# Keep track of any block we successfully deleted
if self.manager.metadata.deleted is None:
self.manager.metadata.deleted = []
# Attempt to delete all expired blocks
for expired_block_id in self.manager.metadata.expired:
# Check to see if we have already deleted the block
if expired_block_id not in self.manager.metadata.deleted:
# Log that we are going to delete the block
self.log.info('Project ID {0}, Vault {1} - '
'Deleting Expired Block: {2}'
.format(self.vault.project_id,
self.vault.vault_id,
expired_block_id))
# Attempt to delete the block
# Note: This may fail for numerous reasons, including
# that the block received a reference count between
# when it was deteremined not to have any and when
# the cleanup tried to remove it.
try:
self.deuceclient.DeleteBlock(self.vault,
self.vault.blocks[
expired_block_id])
# The block was deleted, save it as being so
# This serves two purposes:
# 1. Do not attempt to delete the same block twice
# 2. Do not mutate the expired block list while it
# is being traversed; it'll get cleaned up later
self.manager.metadata.deleted.append(expired_block_id)
block_size = len(self.vault.blocks[
expired_block_id])
self.manager.delete_expired_counter.add(1,
block_size)
self.log.info('Project ID {0}, Vault {1} - '
'Successfully Deleted Expired Block: {2}'
.format(self.vault.project_id,
self.vault.vault_id,
expired_block_id))
except Exception as ex:
self.log.info('Project ID {0}, Vault {1} - '
'FAILED to Deleted Expired Block '
'({2}): {3}'
.format(self.vault.project_id,
self.vault.vault_id,
expired_block_id,
str(ex)))
else:
self.log.info('Project ID {0}, Vault {1} - '
'Already Deleted Expired Block: {2}'
.format(self.vault.project_id,
self.vault.vault_id,
expired_block_id))
# Now cleanup the expired list to remove the blocks that were
# Actually deleted
for deleted_block_id in self.manager.metadata.deleted:
while deleted_block_id in self.manager.metadata.expired:
self.manager.metadata.expired.remove(deleted_block_id)
else:
raise RuntimeError('No expired blocks to remove.'
'Please run validate_metadata() first.')
def build_cross_references(self, skip_expired=False):
"""Build a cross reference look-up for the metadata and storage ids
Primary purpose is to have a quick way to do a reverse lookup of
storage ids to determine validity based on the information we
already have.
:param skip_expired: boolean value for whether or not to include the
expired metadata blocks in the cross reference
data. Default is False which removes them from
the cross reference data.
"""
check_expired = self.manager.metadata.expired is not None
check_deleted = self.manager.metadata.deleted is not None
if skip_expired:
check_expired = False
for block_id, block in self.vault.blocks.items():
storage_id = block.storage_id
self.log.debug('Project ID {0}, Vault {1} - '
'Checking status of Storage ID {2} '
'with Block ID {3}'
.format(self.vault.project_id,
self.vault.vault_id,
storage_id,
block_id))
# Skip the block if it was expired
if check_expired:
if block_id in self.manager.metadata.expired:
self.log.debug('Project ID {0}, Vault {1} - '
'block {2} expired, not '
'cross-referencing'
.format(self.vault.project_id,
self.vault.vault_id,
block_id))
continue
# Skip the block if it was deleted
if check_deleted:
if block_id in self.manager.metadata.deleted:
self.log.debug('Project ID {0}, Vault {1} - '
'block {2} deleted, not '
'cross-referencing'
.format(self.vault.project_id,
self.vault.vault_id,
block_id))
continue
# lookup the storage id
self.log.debug('Project ID {0}, Vault {1} - '
'Mapping Storage ID {2} to Block ID {3}'
.format(self.vault.project_id,
self.vault.vault_id,
storage_id,
block_id))
# Add it to the cross-reference dict
self.manager.cross_reference[storage_id] = block_id
def validate_storage(self, skip_expired=False):
"""Check storage for orphaned blocks
This implements the short version where we only operate on the
listing of the storage blocks.
:param skip_expired: Parameter to call to build_cross_references().
See build_cross_references() for details.
Note: In some cases it may be advantageous to include the expired
metadata blocks in the cross-reference data in order to keep
their associated storage blocks from incorrectly being
detected as orphaned blocks. This is primarily due to an
order of operations issue when validating both metadata and
storage prior to performing the actual block deletions. In
these instances, set skip_expired to True to avoid improper
detection of orphaned blocks that are not really orphaned.
"""
if self.manager.storage.current is None:
self.get_storage_list()
if len(self.manager.storage.current) == 0:
self.get_storage_list()
if self.manager.storage.orphaned is None:
self.manager.storage.orphaned = []
# This builds a quick lookup which provides the following benefits:
# 1. We do not have to look at self.vault.blocks every time
# 2. We do not need to rely on the format of the storage block id to
# determine the metadata block id
self.build_cross_references(skip_expired)
if len(self.manager.cross_reference) == 0:
self.log.warn('Project ID {0}, Vault {1} - no cross-references '
'between metadata and storage. All blocks will be'
'marked as orphaned for metadata block range '
'[{2}, {3})'
.format(
self.vault.project_id,
self.vault.vault_id,
self.manager.start_block,
self.manager.end_block))
# Note: Marking a block orphaned here does not necessarily mean it is
# actually orphaned as there could have been activity on the
# Vault since we got the listings that could change the state of
# any block.This cuts both ways in that some blocks we determine
# are orphaned may not be orphaned, while other blocks we think
# are not orphaned are actually orphaned. This is okay as Deuce
# is designed to prevent deletion of non-ophaned blocks, and
# blocks that are not identified as orphaned but really are will
# be picked up on the next run.
for storage_id in self.manager.storage.current:
if storage_id not in self.manager.cross_reference:
self.log.info('Project ID {0}, Vault {1} - '
'Found Orphaned Storage Block {2}'
.format(
self.vault.project_id,
self.vault.vault_id,
storage_id))
self.manager.storage.orphaned.append(storage_id)
block_size = len(self.vault.storageblocks[storage_id])
if block_size == 0:
mid, sid = storage_id.split('_')
if mid in self.vault.blocks:
self.log.info('Project ID {0}, Vault {1} - '
'Located block {2} matching '
'orphaned block {3}. Using for '
'block size'.format(
self.vault.project_id,
self.vault.vault_id,
mid,
storage_id))
block_size = len(self.vault.blocks[mid])
self.manager.orphaned_counter.add(1, block_size)
def validate_storage_with_head(self):
"""Check storage for orphaned blocks
This implements the long version where we operate on the listing
of the storage blocks and also HEAD each block in storage
"""
if self.manager.storage.current is None:
self.get_storage_list()
if len(self.manager.storage.current) == 0:
self.get_storage_list()
if self.manager.storage.orphaned is None:
self.manager.storage.orphaned = []
# Note: This version relies on Deuce to tell us that a block is
# orphaned so it is the most accurate at the time this
# function is called. However, there is still the chance that
# a block has a change of state between when we access it here
# and when it actually gets cleaned up
for storage_id in self.manager.storage.current:
self.log.debug('Project ID {0}, Vault {1} - '
'Validating Storage Block: {2}'
.format(self.vault.project_id,
self.vault.vault_id,
storage_id))
try:
block = self.deuceclient.HeadBlockStorage(self.vault,
self.vault.
storageblocks[
storage_id])
except Exception as ex:
# if there was a problem just mark the block as None so it
# get ignored for this iteration of the loop
self.log.warn('Project ID {0}, Vault {1} - '
'Storage Block {2} error heading block ({3}): {4}'
.format(self.vault.project_id,
self.vault.vault_id,
storage_id,
type(ex),
str(ex)))
block = None
# if there was a problem then go to the next block_id
if block is None:
self.log.warn('Project ID {0}, Vault {1} - '
'Storage Block {2} no block data to analyze'
.format(self.vault.project_id,
self.vault.vault_id,
storage_id))
continue
if block.block_orphaned:
self.log.info('Project ID {0}, Vault {1} - '
'Found Orphaned Storage Block {2}'
.format(
self.vault.project_id,
self.vault.vault_id,
storage_id))
self.manager.storage.orphaned.append(storage_id)
block_size = len(block)
self.log.info('Storage Block ID {0} - block size {1}'
.format(storage_id, block_size))
if block_size == 0:
mid, sid = storage_id.split('_')
self.log.info('\tBlock ID: {0}'.format(mid))
self.log.info('\tStorage UUID: {0}'.format(sid))
if mid in self.vault.blocks:
self.log.info('\tBlock ID {0} in Vault'.format(mid))
self.log.info('Project ID {0}, Vault {1} - '
'Located block {2} matching '
'orphaned block {3}. Using for '
'block size'.format(
self.vault.project_id,
self.vault.vault_id,
mid,
storage_id))
self.log.info('\tUpdating Block Size from {0} to {1}'
.format(block_size,
len(self.vault.blocks[mid])))
block_size = len(self.vault.blocks[mid])
self.manager.orphaned_counter.add(1, block_size)
def calculate_current(self):
"""Calculate the amount of data that is still current
"""
if self.manager.metadata.current is not None:
for block_id in self.manager.metadata.current:
block_size = len(self.vault.blocks[block_id])
self.manager.current_counter.add(1, block_size)
def cleanup_storage(self):
"""Delete orphaned blocks from storage
Attempt to delete each orphaned block
On success, adds the block to the list deleted blocks and removes
the storage id from the orphaned blocks
Note: A block deletion may fail for numerous reasons including that
the block received a change in its state so it is no longer
orphaned between when it was detected as orphaned and when
the deletion operation occurred. This is a designed feature
of Deuce so that blocks that blocks are not accidentally or
improperly removed.
"""
# Note: This function must be very verbose in its logging since it
# it removing user data that will not be recoverable from
# within Deuce
# Check if there is a list to operate on; if not throw an error
if self.manager.storage.orphaned is not None:
# Keep track of any block we successfully deleted
if self.manager.storage.deleted is None:
self.manager.storage.deleted = []
# Attempt to delete all expired blocks
for orphaned_storage_block_id in self.manager.storage.orphaned:
# Check to see if we have already deleted the block
if orphaned_storage_block_id not in \
self.manager.storage.deleted:
# Log that we are going to delete the block
self.log.info('Project ID {0}, Vault {1} - '
'Deleting Orphaned Block: {2}'
.format(self.vault.project_id,
self.vault.vault_id,
orphaned_storage_block_id))
# Attempt to delete the block
# Note: This may fail for numerous reasons, including
# that the block received a reference count between
# when it was deteremined not to have any and when
# the cleanup tried to remove it.
try:
osbid = orphaned_storage_block_id
orphaned_block = Block(self.vault.project_id,
self.vault.vault_id,
block_id=None,
storage_id=osbid,
block_type='storage')
self.deuceclient.DeleteBlockStorage(self.vault,
orphaned_block)
# The block was deleted, save it as being so
# This serves two purposes:
# 1. Do not attempt to delete the same block twice
# 2. Do not mutate the orphaned block list while it
# is being traversed; it'll get cleaned up later
self.manager.storage.deleted.append(
orphaned_storage_block_id)
# By default this will be zero and this should be
# valid since the oprhaned_storage_block_id comes
# from listing the storage blocks to start with
block_size = len(self.vault.storageblocks[
orphaned_storage_block_id])
if block_size == 0:
mid, sid = orphaned_storage_block_id.split('_')
if mid in self.vault.blocks:
self.log.info('Project ID {0}, Vault {1} - '
'Located block {2} matching '
'orphaned block {3}. Using for '
'block size'.format(
self.vault.project_id,
self.vault.vault_id,
mid,
orphaned_storage_block_id))
block_size = len(self.vault.blocks[mid])
self.manager.delete_orphaned_counter.add(1, block_size)
self.log.info('Project ID {0}, Vault {1} - '
'Successfully Deleted Orphaned Block: '
'{2}'
.format(self.vault.project_id,
self.vault.vault_id,
orphaned_storage_block_id))
except Exception as ex:
self.log.info('Project ID {0}, Vault {1} - '
'FAILED to Deleted Orphaned Block '
'({2}): {3}'
.format(self.vault.project_id,
self.vault.vault_id,
orphaned_storage_block_id,
str(ex)))
else:
self.log.info('Project ID {0}, Vault {1} - '
'Already Deleted Orphaned Block: {2}'
.format(self.vault.project_id,
self.vault.vault_id,
orphaned_storage_block_id))
# Now cleanup the orphaned list to remove the blocks that were
# Actually deleted
for deleted_block_id in self.manager.storage.deleted:
while deleted_block_id in self.manager.storage.orphaned:
self.manager.storage.orphaned.remove(deleted_block_id)
else:
raise RuntimeError('No orphaned blocks to remove.'
'Please run validate_storage() '
'or validate_storage_with_head() first.')
| 47.566319
| 79
| 0.480935
|
f6015e5c8472153234087c50b234d5567924b3f4
| 1,322
|
py
|
Python
|
migrations/versions/f60b3448eeb8_granular_olink_facet_groups.py
|
CIMAC-CIDC/cidc-api-gae
|
23f31c6d5b7f16c121001192822f92fb88395652
|
[
"MIT"
] | null | null | null |
migrations/versions/f60b3448eeb8_granular_olink_facet_groups.py
|
CIMAC-CIDC/cidc-api-gae
|
23f31c6d5b7f16c121001192822f92fb88395652
|
[
"MIT"
] | 54
|
2019-08-09T19:49:11.000Z
|
2022-01-26T21:36:08.000Z
|
migrations/versions/f60b3448eeb8_granular_olink_facet_groups.py
|
CIMAC-CIDC/cidc-api-gae
|
23f31c6d5b7f16c121001192822f92fb88395652
|
[
"MIT"
] | 4
|
2019-07-24T12:26:49.000Z
|
2021-09-29T19:02:01.000Z
|
"""Granular Olink facet groups
Revision ID: f60b3448eeb8
Revises: 26ba8b4e9b51
Create Date: 2020-09-28 08:35:51.265610
"""
from alembic import op
import sqlalchemy as sa
from cidc_api.models import Session, DownloadableFiles
# revision identifiers, used by Alembic.
revision = "f60b3448eeb8"
down_revision = "26ba8b4e9b51"
branch_labels = None
depends_on = None
olink_facet_group_cases = sa.case(
[
(
DownloadableFiles.object_url.like("%olink%assay_npx.xlsx"),
"/olink/chip_/assay_npx.xlsx",
),
(
DownloadableFiles.object_url.like("%olink%assay_raw_ct.csv"),
"/olink/chip_/assay_raw_ct.csv",
),
(
DownloadableFiles.object_url.like("%olink%study_npx.xlsx"),
"/olink/study_npx.xlsx",
),
],
else_=DownloadableFiles.facet_group,
)
def upgrade():
session = Session(bind=op.get_bind())
session.query(DownloadableFiles).update(
{"facet_group": olink_facet_group_cases}, synchronize_session="fetch"
)
session.commit()
def downgrade():
session = Session(bind=op.get_bind())
session.query(DownloadableFiles).filter(
DownloadableFiles.upload_type == "olink"
).update({"facet_group": "Assay Type|Olink|All Olink Files|/olink"})
session.commit()
| 25.423077
| 77
| 0.666415
|
206db63aa1509ea357228e27440965cf48f5453c
| 862
|
py
|
Python
|
coding_interview/66.py
|
smartx-jshan/Coding_Practice
|
bc7d485e7992031e55df62483818b721ad7d1d4f
|
[
"Apache-2.0"
] | null | null | null |
coding_interview/66.py
|
smartx-jshan/Coding_Practice
|
bc7d485e7992031e55df62483818b721ad7d1d4f
|
[
"Apache-2.0"
] | null | null | null |
coding_interview/66.py
|
smartx-jshan/Coding_Practice
|
bc7d485e7992031e55df62483818b721ad7d1d4f
|
[
"Apache-2.0"
] | null | null | null |
class Solution:
def search(self, nums: List[int], target: int) -> int:
if not nums:
return -1
# find minimal value
left = 0
right= len(nums) -1
while left < right:
mid = left + (right-left) // 2
if nums[mid] > nums[right]:
left = mid + 1
else:
right = mid
pivot = left
left = 0
right = len(nums) -1
while left <= right:
mid = left + (right-left)//2
mid_pivot = (mid + pivot) % len(nums)
if nums[mid_pivot] < target:
left = mid + 1
elif nums[mid_pivot] > target:
right = mid -1
else:
return mid_pivot
return -1
| 24.628571
| 58
| 0.390951
|
f7290948c42d8e7aa37006897fa91645c005e290
| 339
|
py
|
Python
|
ironsms/exceptions/limits.py
|
viuipan/ironsmslib
|
0d494ea08a6bcdd0f11f32e88baccd3555b5a0b3
|
[
"MIT"
] | null | null | null |
ironsms/exceptions/limits.py
|
viuipan/ironsmslib
|
0d494ea08a6bcdd0f11f32e88baccd3555b5a0b3
|
[
"MIT"
] | null | null | null |
ironsms/exceptions/limits.py
|
viuipan/ironsmslib
|
0d494ea08a6bcdd0f11f32e88baccd3555b5a0b3
|
[
"MIT"
] | null | null | null |
from .base import APIException
class LimitActiveException(APIException):
message = "LIMIT_ACTIVE"
description = "number activation has already been completed"
limit: int
def __init__(self, response):
self.limit = response['limit']
super(LimitActiveException, self).__init__(description=self.description)
| 28.25
| 80
| 0.731563
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.