repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
gmarek/test-infra | refs/heads/master | gubernator/github/models.py | 4 | # Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
import google.appengine.ext.ndb as ndb
class GithubResource(ndb.Model):
# A key holder used to define an entitygroup for
# each Issue/PR, for easy ancestor queries.
@staticmethod
def make_key(repo, number):
return ndb.Key(GithubResource, '%s %s' % (repo, number))
def shrink(body):
'''
Recursively remove Github API urls from an object, to make it
more human-readable.
'''
toremove = []
for key, value in body.iteritems():
if isinstance(value, basestring):
if key.endswith('url'):
if (value.startswith('https://api.github.com/') or
value.startswith('https://avatars.githubusercontent.com')):
toremove.append(key)
elif isinstance(value, dict):
shrink(value)
elif isinstance(value, list):
for el in value:
if isinstance(el, dict):
shrink(el)
for key in toremove:
body.pop(key)
return body
class GithubWebhookRaw(ndb.Model):
repo = ndb.StringProperty()
number = ndb.IntegerProperty(indexed=False)
event = ndb.StringProperty()
timestamp = ndb.DateTimeProperty(auto_now_add=True)
body = ndb.TextProperty(compressed=True)
def to_tuple(self):
return (self.event, shrink(json.loads(self.body)), int(self.timestamp.strftime('%s')))
def from_iso8601(t):
return t and datetime.datetime.strptime(t, '%Y-%m-%dT%H:%M:%SZ')
def make_kwargs(body, fields):
kwargs = {}
for field in fields:
if field.endswith('_at'):
kwargs[field] = from_iso8601(body[field])
else:
kwargs[field] = body[field]
return kwargs
class GHStatus(ndb.Model):
# Key: {repo}\t{sha}\t{context}
state = ndb.StringProperty(indexed=False)
target_url = ndb.StringProperty(indexed=False)
description = ndb.TextProperty()
created_at = ndb.DateTimeProperty(indexed=False)
updated_at = ndb.DateTimeProperty(indexed=False)
@staticmethod
def make_key(repo, sha, context):
return ndb.Key(GHStatus, '%s\t%s\t%s' % (repo, sha, context))
@staticmethod
def make(repo, sha, context, **kwargs):
return GHStatus(key=GHStatus.make_key(repo, sha, context), **kwargs)
@staticmethod
def query_for_sha(repo, sha):
before = GHStatus.make_key(repo, sha, '')
after = GHStatus.make_key(repo, sha, '\x7f')
return GHStatus.query(GHStatus.key > before, GHStatus.key < after)
@staticmethod
def from_json(body):
kwargs = make_kwargs(body,
'sha context state target_url description '
'created_at updated_at'.split())
kwargs['repo'] = body['name']
return GHStatus.make(**kwargs)
@property
def repo(self):
return self.key.id().split('\t', 1)[0]
@property
def sha(self):
return self.key.id().split('\t', 2)[1]
@property
def context(self):
return self.key.id().split('\t', 2)[2]
class GHIssueDigest(ndb.Model):
# Key: {repo} {number}
is_pr = ndb.BooleanProperty()
is_open = ndb.BooleanProperty()
involved = ndb.StringProperty(repeated=True)
xref = ndb.StringProperty(repeated=True)
payload = ndb.JsonProperty()
updated_at = ndb.DateTimeProperty()
head = ndb.StringProperty()
@staticmethod
def make_key(repo, number):
return ndb.Key(GHIssueDigest, '%s %s' % (repo, number))
@staticmethod
def make(repo, number, is_pr, is_open, involved, payload, updated_at):
return GHIssueDigest(key=GHIssueDigest.make_key(repo, number),
is_pr=is_pr, is_open=is_open, involved=involved, payload=payload,
updated_at=updated_at, head=payload.get('head'),
xref=payload.get('xrefs', []))
@staticmethod
def get(repo, number):
return GHIssueDigest.make_key(repo, number).get()
@property
def repo(self):
return self.key.id().split()[0]
@property
def number(self):
return int(self.key.id().split()[1])
@staticmethod
def find_head(repo, head):
return GHIssueDigest.query(GHIssueDigest.key > GHIssueDigest.make_key(repo, ''),
GHIssueDigest.key < GHIssueDigest.make_key(repo, '~'),
GHIssueDigest.head == head)
@staticmethod
def find_xrefs(xref):
return GHIssueDigest.query(GHIssueDigest.xref == xref)
class GHUserState(ndb.Model):
# Key: {github username}
acks = ndb.JsonProperty() # dict of issue keys => ack time (seconds since epoch)
@staticmethod
def make_key(user):
return ndb.Key(GHUserState, user)
@staticmethod
def make(user, acks=None):
return GHUserState(key=GHUserState.make_key(user), acks=acks or {})
@ndb.transactional
def save_if_newer(obj):
assert obj.updated_at is not None
old = obj.key.get()
if old is None:
obj.put()
return True
else:
if old.updated_at is None or obj.updated_at >= old.updated_at:
obj.put()
return True
return False
|
huiyiqun/check_mk | refs/heads/master | web/htdocs/gui_exceptions.py | 1 | #!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# tails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
from cmk.exceptions import MKException, MKGeneralException
class MKAuthException(MKException):
def __init__(self, reason):
self.reason = reason
super(MKAuthException, self).__init__(reason)
def __str__(self):
return self.reason
def title(self):
return _("Permission denied")
def plain_title(self):
return _("Authentication error")
class MKUnauthenticatedException(MKGeneralException):
def title(self):
return _("Not authenticated")
def plain_title(self):
return _("Missing authentication credentials")
class MKConfigError(MKException):
def title(self):
return _("Configuration error")
def plain_title(self):
return self.title()
class MKUserError(MKException):
def __init__(self, varname, message):
self.varname = varname
self.message = message
super(MKUserError, self).__init__(varname, message)
def __str__(self):
return self.message
def title(self):
return _("Invalid User Input")
def plain_title(self):
return _("User error")
class MKInternalError(MKException):
pass
|
goofy57/google_python_class | refs/heads/master | copyspecial/solution/copyspecial.py | 206 | #!/usr/bin/python
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
import sys
import re
import os
import shutil
import commands
"""Copy Special exercise
"""
# +++your code here+++
# Write functions and modify main() to call them
# LAB(begin solution)
def get_special_paths(dirname):
"""Given a dirname, returns a list of all its special files."""
result = []
paths = os.listdir(dirname) # list of paths in that dir
for fname in paths:
match = re.search(r'__(\w+)__', fname)
if match:
result.append(os.path.abspath(os.path.join(dirname, fname)))
return result
def copy_to(paths, to_dir):
"""Copy all of the given files to the given dir, creating it if necessary."""
if not os.path.exists(to_dir):
os.mkdir(to_dir)
for path in paths:
fname = os.path.basename(path)
shutil.copy(path, os.path.join(to_dir, fname))
# could error out if already exists os.path.exists():
def zip_to(paths, zipfile):
"""Zip up all of the given files into a new zip file with the given name."""
cmd = 'zip -j ' + zipfile + ' ' + ' '.join(paths)
print "Command I'm going to do:" + cmd
(status, output) = commands.getstatusoutput(cmd)
# If command had a problem (status is non-zero),
# print its output to stderr and exit.
if status:
sys.stderr.write(output)
sys.exit(1)
# LAB(end solution)
def main():
# This basic command line argument parsing code is provided.
# Add code to call your functions below.
# Make a list of command line arguments, omitting the [0] element
# which is the script itself.
args = sys.argv[1:]
if not args:
print "usage: [--todir dir][--tozip zipfile] dir [dir ...]";
sys.exit(1)
# todir and tozip are either set from command line
# or left as the empty string.
# The args array is left just containing the dirs.
todir = ''
if args[0] == '--todir':
todir = args[1]
del args[0:2]
tozip = ''
if args[0] == '--tozip':
tozip = args[1]
del args[0:2]
if len(args) == 0:
print "error: must specify one or more dirs"
sys.exit(1)
# +++your code here+++
# Call your functions
# LAB(begin solution)
# Gather all the special files
paths = []
for dirname in args:
paths.extend(get_special_paths(dirname))
if todir:
copy_to(paths, todir)
elif tozip:
zip_to(paths, tozip)
else:
print '\n'.join(paths)
# LAB(end solution)
if __name__ == "__main__":
main()
|
pnigos/gyp | refs/heads/master | test/configurations/x64/gyptest-x86.py | 340 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies build of an executable in three different configurations.
"""
import TestGyp
import sys
formats = ['msvs']
if sys.platform == 'win32':
formats += ['ninja']
test = TestGyp.TestGyp(formats=formats)
test.run_gyp('configurations.gyp')
test.set_configuration('Debug|Win32')
test.build('configurations.gyp', test.ALL)
for machine, suffix in [('14C machine (x86)', ''),
('8664 machine (x64)', '64')]:
output = test.run_dumpbin(
'/headers', test.built_file_path('configurations%s.exe' % suffix))
if machine not in output:
test.fail_test()
test.pass_test()
|
TheKnarf/apprtc | refs/heads/master | src/app_engine/apiauth.py | 25 | # Copyright 2015 Google Inc. All Rights Reserved.
"""Google API auth utilities."""
import json
import os
import sys
# Insert our third-party libraries first to avoid conflicts with appengine.
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'third_party'))
from apiclient import discovery
import httplib2
import oauth2client.appengine
import oauth2client.client
import constants
def build(scope, service_name, version):
"""Build a service object if authorization is available."""
credentials = None
if constants.IS_DEV_SERVER:
# Local instances require a 'secrets.json' file.
secrets_path = os.path.join(os.path.dirname(__file__), 'secrets.json')
if os.path.exists(secrets_path):
with open(secrets_path) as f:
auth = json.load(f)
credentials = oauth2client.client.SignedJwtAssertionCredentials(
auth['client_email'], auth['private_key'], scope)
else:
# Use the GAE service credentials.
credentials = oauth2client.appengine.AppAssertionCredentials(scope=scope)
if credentials is None:
return None
http = credentials.authorize(httplib2.Http())
return discovery.build(service_name, version, http=http)
|
glatard/nipype | refs/heads/master | nipype/pipeline/plugins/tests/test_pbs.py | 16 | import os
from shutil import rmtree
from tempfile import mkdtemp
from time import sleep
import nipype.interfaces.base as nib
from nipype.testing import assert_equal, skipif
import nipype.pipeline.engine as pe
class InputSpec(nib.TraitedSpec):
input1 = nib.traits.Int(desc='a random int')
input2 = nib.traits.Int(desc='a random int')
class OutputSpec(nib.TraitedSpec):
output1 = nib.traits.List(nib.traits.Int, desc='outputs')
class TestInterface(nib.BaseInterface):
input_spec = InputSpec
output_spec = OutputSpec
def _run_interface(self, runtime):
runtime.returncode = 0
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['output1'] = [1, self.inputs.input1]
return outputs
@skipif(True)
def test_run_pbsgraph():
cur_dir = os.getcwd()
temp_dir = mkdtemp(prefix='test_engine_')
os.chdir(temp_dir)
pipe = pe.Workflow(name='pipe')
mod1 = pe.Node(interface=TestInterface(),name='mod1')
mod2 = pe.MapNode(interface=TestInterface(),
iterfield=['input1'],
name='mod2')
pipe.connect([(mod1,mod2,[('output1','input1')])])
pipe.base_dir = os.getcwd()
mod1.inputs.input1 = 1
execgraph = pipe.run(plugin="PBSGraph")
names = ['.'.join((node._hierarchy,node.name)) for node in execgraph.nodes()]
node = execgraph.nodes()[names.index('pipe.mod1')]
result = node.get_output('output1')
yield assert_equal, result, [1, 1]
os.chdir(cur_dir)
rmtree(temp_dir) |
Mazecreator/tensorflow | refs/heads/master | tensorflow/contrib/session_bundle/session_bundle_test.py | 133 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for session_bundle.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import shutil
import numpy as np
from tensorflow.contrib.session_bundle import constants
from tensorflow.contrib.session_bundle import manifest_pb2
from tensorflow.contrib.session_bundle import session_bundle
from tensorflow.core.example.example_pb2 import Example
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.parsing_ops # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.training import saver
from tensorflow.python.util import compat
SAVED_MODEL_PATH = (
"python/saved_model/example/saved_model_half_plus_two/00000123")
SESSION_BUNDLE_PATH = "contrib/session_bundle/testdata/half_plus_two/00000123"
def _make_serialized_example(x):
example = Example()
example.features.feature["x"].float_list.value.append(x)
return example.SerializeToString()
class SessionBundleLoadTest(test.TestCase):
def _checkRegressionSignature(self, signatures, sess):
default_signature = signatures.default_signature
input_name = default_signature.regression_signature.input.tensor_name
output_name = default_signature.regression_signature.output.tensor_name
tf_example = [_make_serialized_example(x) for x in [0, 1, 2, 3]]
y = sess.run([output_name], {input_name: tf_example})
# The operation is y = 0.5 * x + 2
self.assertEqual(y[0][0], 2)
self.assertEqual(y[0][1], 2.5)
self.assertEqual(y[0][2], 3)
self.assertEqual(y[0][3], 3.5)
def _checkNamedSignatures(self, signatures, sess):
named_signatures = signatures.named_signatures
input_name = (named_signatures["inputs"].generic_signature.map["x"]
.tensor_name)
output_name = (named_signatures["outputs"].generic_signature.map["y"]
.tensor_name)
y = sess.run([output_name], {input_name: np.array([[0], [1], [2], [3]])})
# The operation is y = 0.5 * x + 2
self.assertEqual(y[0][0], 2)
self.assertEqual(y[0][1], 2.5)
self.assertEqual(y[0][2], 3)
self.assertEqual(y[0][3], 3.5)
def testMaybeSessionBundleDir(self):
base_path = test.test_src_dir_path(SESSION_BUNDLE_PATH)
self.assertTrue(session_bundle.maybe_session_bundle_dir(base_path))
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
self.assertFalse(session_bundle.maybe_session_bundle_dir(base_path))
base_path = "complete_garbage"
self.assertFalse(session_bundle.maybe_session_bundle_dir(base_path))
def testBasic(self):
base_path = test.test_src_dir_path(SESSION_BUNDLE_PATH)
ops.reset_default_graph()
sess, meta_graph_def = session_bundle.load_session_bundle_from_path(
base_path,
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2}))
self.assertTrue(sess)
asset_path = os.path.join(base_path, constants.ASSETS_DIRECTORY)
with sess.as_default():
path1, path2 = sess.run(["filename1:0", "filename2:0"])
self.assertEqual(
compat.as_bytes(os.path.join(asset_path, "hello1.txt")), path1)
self.assertEqual(
compat.as_bytes(os.path.join(asset_path, "hello2.txt")), path2)
collection_def = meta_graph_def.collection_def
signatures_any = collection_def[constants.SIGNATURES_KEY].any_list.value
self.assertEquals(len(signatures_any), 1)
signatures = manifest_pb2.Signatures()
signatures_any[0].Unpack(signatures)
self._checkRegressionSignature(signatures, sess)
self._checkNamedSignatures(signatures, sess)
def testBadPath(self):
base_path = test.test_src_dir_path("/no/such/a/dir")
ops.reset_default_graph()
with self.assertRaises(RuntimeError) as cm:
_, _ = session_bundle.load_session_bundle_from_path(
base_path,
target="local",
config=config_pb2.ConfigProto(device_count={"CPU": 2}))
self.assertTrue("Expected meta graph file missing" in str(cm.exception))
def testVarCheckpointV2(self):
base_path = test.test_src_dir_path(
"contrib/session_bundle/testdata/half_plus_two_ckpt_v2/00000123")
ops.reset_default_graph()
sess, meta_graph_def = session_bundle.load_session_bundle_from_path(
base_path,
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2}))
self.assertTrue(sess)
asset_path = os.path.join(base_path, constants.ASSETS_DIRECTORY)
with sess.as_default():
path1, path2 = sess.run(["filename1:0", "filename2:0"])
self.assertEqual(
compat.as_bytes(os.path.join(asset_path, "hello1.txt")), path1)
self.assertEqual(
compat.as_bytes(os.path.join(asset_path, "hello2.txt")), path2)
collection_def = meta_graph_def.collection_def
signatures_any = collection_def[constants.SIGNATURES_KEY].any_list.value
self.assertEquals(len(signatures_any), 1)
signatures = manifest_pb2.Signatures()
signatures_any[0].Unpack(signatures)
self._checkRegressionSignature(signatures, sess)
self._checkNamedSignatures(signatures, sess)
class SessionBundleLoadNoVarsTest(test.TestCase):
"""Test the case where there are no variables in the graph."""
def setUp(self):
self.base_path = os.path.join(test.get_temp_dir(), "no_vars")
if not os.path.exists(self.base_path):
os.mkdir(self.base_path)
# Create a simple graph with a variable, then convert variables to
# constants and export the graph.
with ops.Graph().as_default() as g:
x = array_ops.placeholder(dtypes.float32, name="x")
w = variables.Variable(3.0)
y = math_ops.subtract(w * x, 7.0, name="y") # pylint: disable=unused-variable
ops.add_to_collection("meta", "this is meta")
with self.test_session(graph=g) as session:
variables.global_variables_initializer().run()
new_graph_def = graph_util.convert_variables_to_constants(
session, g.as_graph_def(), ["y"])
filename = os.path.join(self.base_path, constants.META_GRAPH_DEF_FILENAME)
saver.export_meta_graph(
filename, graph_def=new_graph_def, collection_list=["meta"])
def tearDown(self):
shutil.rmtree(self.base_path)
def testGraphWithoutVarsLoadsCorrectly(self):
session, _ = session_bundle.load_session_bundle_from_path(self.base_path)
got = session.run(["y:0"], {"x:0": 5.0})[0]
self.assertEquals(got, 5.0 * 3.0 - 7.0)
self.assertEquals(ops.get_collection("meta"), [b"this is meta"])
if __name__ == "__main__":
test.main()
|
Weihonghao/ECM | refs/heads/master | Vpy34/lib/python3.5/site-packages/tensorflow/contrib/learn/python/learn/estimators/estimator_test.py | 4 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import json
import tempfile
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = tf.contrib.learn.datasets.load_boston()
features = tf.train.limit_epochs(
tf.reshape(tf.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = tf.reshape(tf.constant(boston.target), [-1, 1])
return features, labels
def iris_input_fn():
iris = tf.contrib.learn.datasets.load_iris()
features = tf.reshape(tf.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = tf.reshape(tf.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = tf.contrib.learn.datasets.load_iris()
features = tf.reshape(tf.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': tf.reshape(tf.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = tf.contrib.learn.datasets.load_boston()
n_examples = len(boston.target)
features = tf.reshape(
tf.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = tf.reshape(tf.constant(boston.target), [n_examples, 1])
return tf.concat(0, [features, features]), tf.concat(0, [labels, labels])
def linear_model_params_fn(features, labels, mode, params):
assert mode in (
tf.contrib.learn.ModeKeys.TRAIN,
tf.contrib.learn.ModeKeys.EVAL,
tf.contrib.learn.ModeKeys.INFER)
prediction, loss = (
tf.contrib.learn.models.linear_regression_zero_init(features, labels)
)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
assert mode in (
tf.contrib.learn.ModeKeys.TRAIN,
tf.contrib.learn.ModeKeys.EVAL,
tf.contrib.learn.ModeKeys.INFER)
prediction, loss = (
tf.contrib.learn.models.linear_regression_zero_init(features, labels)
)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (
tf.contrib.learn.ModeKeys.TRAIN,
tf.contrib.learn.ModeKeys.EVAL,
tf.contrib.learn.ModeKeys.INFER)
prediction, loss = (
tf.contrib.learn.models.linear_regression_zero_init(features, labels)
)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
learning_rate=0.1)
return model_fn.ModelFnOps(mode=mode,
predictions=prediction,
loss=loss,
train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
if isinstance(labels, dict):
labels = labels['labels']
labels = tf.one_hot(labels, 3, 1, 0)
prediction, loss = (
tf.contrib.learn.models.logistic_regression_zero_init(features, labels)
)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
learning_rate=0.1)
return {'class': tf.argmax(prediction, 1), 'prob': prediction}, loss, train_op
class CheckCallsMonitor(tf.contrib.learn.monitors.BaseMonitor):
def __init__(self, expect_calls):
super(CheckCallsMonitor, self).__init__()
self.begin_calls = None
self.end_calls = None
self.expect_calls = expect_calls
def begin(self, max_steps):
self.begin_calls = 0
self.end_calls = 0
def step_begin(self, step):
self.begin_calls += 1
return {}
def step_end(self, step, outputs):
self.end_calls += 1
return False
def end(self):
assert (self.end_calls == self.expect_calls and
self.begin_calls == self.expect_calls)
class EstimatorTest(tf.test.TestCase):
def testInvalidModelFn_no_train_op(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = tf.Variable(42.0, 'weight')
loss = 100.0 - w
return None, loss, None
est = tf.contrib.learn.Estimator(model_fn=_invalid_model_fn)
with self.assertRaisesRegexp(ValueError, 'Missing training_op'):
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_loss(self):
def _invalid_model_fn(features, labels, mode):
# pylint: disable=unused-argument
w = tf.Variable(42.0, 'weight')
loss = 100.0 - w
train_op = w.assign_add(loss / 100.0)
predictions = loss
if mode == tf.contrib.learn.ModeKeys.EVAL:
loss = None
return predictions, loss, train_op
est = tf.contrib.learn.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing loss'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
def testInvalidModelFn_no_prediction(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = tf.Variable(42.0, 'weight')
loss = 100.0 - w
train_op = w.assign_add(loss / 100.0)
return None, loss, train_op
est = tf.contrib.learn.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(input_fn=boston_input_fn)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(
input_fn=functools.partial(boston_input_fn, num_epochs=1),
as_iterable=True)
def testCustomConfig(self):
test_random_seed = 5783452
class TestInput(object):
def __init__(self):
self.random_seed = 0
def config_test_input_fn(self):
self.random_seed = tf.get_default_graph().seed
return tf.constant([[1.]]), tf.constant([1.])
config = tf.contrib.learn.RunConfig(tf_random_seed=test_random_seed)
test_input = TestInput()
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn, config=config)
est.fit(input_fn=test_input.config_test_input_fn, steps=1)
# If input_fn ran, it will have given us the random seed set on the graph.
self.assertEquals(test_random_seed, test_input.random_seed)
def testCheckInputs(self):
est = tf.contrib.learn.SKCompat(
tf.contrib.learn.Estimator(model_fn=linear_model_fn))
# Lambdas so we have to different objects to compare
right_features = lambda: np.ones(shape=[7, 8], dtype=np.float32)
right_labels = lambda: np.ones(shape=[7, 10], dtype=np.int32)
est.fit(right_features(), right_labels(), steps=1)
# TODO(wicke): This does not fail for np.int32 because of data_feeder magic.
wrong_type_features = np.ones(shape=[7., 8.], dtype=np.int64)
wrong_size_features = np.ones(shape=[7, 10])
wrong_type_labels = np.ones(shape=[7., 10.], dtype=np.float32)
wrong_size_labels = np.ones(shape=[7, 11])
est.fit(x=right_features(), y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_type_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_size_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_type_labels, steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_size_labels, steps=1)
def testBadInput(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
self.assertRaisesRegexp(ValueError,
'Either x or input_fn must be provided.',
est.fit, x=None, input_fn=None)
self.assertRaisesRegexp(ValueError,
'Can not provide both input_fn and x or y',
est.fit, x='X', input_fn=iris_input_fn)
self.assertRaisesRegexp(ValueError,
'Can not provide both input_fn and x or y',
est.fit, y='Y', input_fn=iris_input_fn)
self.assertRaisesRegexp(ValueError,
'Can not provide both input_fn and batch_size',
est.fit, input_fn=iris_input_fn, batch_size=100)
self.assertRaisesRegexp(
ValueError, 'Inputs cannot be tensors. Please provide input_fn.',
est.fit, x=tf.constant(1.))
def testUntrained(self):
boston = tf.contrib.learn.datasets.load_boston()
est = tf.contrib.learn.SKCompat(
tf.contrib.learn.Estimator(model_fn=linear_model_fn))
with self.assertRaises(tf.contrib.learn.NotFittedError):
_ = est.score(
x=boston.data,
y=boston.target.astype(np.float64))
with self.assertRaises(tf.contrib.learn.NotFittedError):
est.predict(x=boston.data)
def testContinueTraining(self):
boston = tf.contrib.learn.datasets.load_boston()
output_dir = tempfile.mkdtemp()
est = tf.contrib.learn.SKCompat(
tf.contrib.learn.Estimator(model_fn=linear_model_fn,
model_dir=output_dir))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=50)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': tf.contrib.metrics.streaming_mean_squared_error})
del est
# Create another estimator object with the same output dir.
est2 = tf.contrib.learn.SKCompat(
tf.contrib.learn.Estimator(model_fn=linear_model_fn,
model_dir=output_dir))
# Check we can evaluate and predict.
scores2 = est2.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': tf.contrib.metrics.streaming_mean_squared_error})
self.assertAllClose(scores['MSE'], scores2['MSE'])
predictions = np.array(list(est2.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, float64_labels)
self.assertAllClose(scores['MSE'], other_score)
# Check we can keep training.
est2.fit(x=boston.data, y=float64_labels, steps=100)
scores3 = est2.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': tf.contrib.metrics.streaming_mean_squared_error})
self.assertLess(scores3['MSE'], scores['MSE'])
def testEstimatorParams(self):
boston = tf.contrib.learn.datasets.load_boston()
est = tf.contrib.learn.SKCompat(
tf.contrib.learn.Estimator(model_fn=linear_model_params_fn,
params={'learning_rate': 0.01}))
est.fit(x=boston.data, y=boston.target, steps=100)
def testBostonAll(self):
boston = tf.contrib.learn.datasets.load_boston()
est = tf.contrib.learn.SKCompat(
tf.contrib.learn.Estimator(model_fn=linear_model_fn))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=100)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': tf.contrib.metrics.streaming_mean_squared_error})
predictions = np.array(list(est.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(scores['MSE'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testIrisAll(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.SKCompat(
tf.contrib.learn.Estimator(model_fn=logistic_model_no_mode_fn))
est.fit(iris.data, iris.target, steps=100)
scores = est.score(
x=iris.data,
y=iris.target,
metrics={('accuracy', 'class'): tf.contrib.metrics.streaming_accuracy})
predictions = est.predict(x=iris.data)
predictions_class = est.predict(x=iris.data, outputs=['class'])['class']
self.assertEqual(predictions['prob'].shape[0], iris.target.shape[0])
self.assertAllClose(
predictions['class'],
predictions_class)
self.assertAllClose(
predictions['class'],
np.argmax(predictions['prob'], axis=1))
other_score = _sklearn.accuracy_score(iris.target, predictions['class'])
self.assertAllClose(scores['accuracy'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testIrisInputFn(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisInputFnLabelsDict(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn_labels_dict, steps=100)
_ = est.evaluate(
input_fn=iris_input_fn_labels_dict,
steps=1,
metrics={
'accuracy':
metric_spec.MetricSpec(
metric_fn=tf.contrib.metrics.streaming_accuracy,
prediction_key='class',
label_key='labels')
})
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisIterator(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = itertools.islice(iris.target, 100)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisIteratorArray(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (np.array(x) for x in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisIteratorPlainInt(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (v for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisTruncatedIterator(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 50)
y_iter = ([np.int32(v)] for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
def testTrainInputFn(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_eval_fn, steps=1)
def testTrainStepsIsIncremental(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, steps=15)
self.assertEqual(25, est.get_variable_value('global_step'))
def testTrainMaxStepsIsNotIncremental(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, max_steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, max_steps=15)
self.assertEqual(15, est.get_variable_value('global_step'))
def testPredict(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
boston = tf.contrib.learn.datasets.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
output = list(est.predict(x=boston.data, batch_size=10))
self.assertEqual(len(output), boston.target.shape[0])
def testPredictInputFn(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
boston = tf.contrib.learn.datasets.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testWithModelFnOps(self):
"""Test for model_fn that returns `ModelFnOps`."""
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn_with_model_fn_ops)
boston = tf.contrib.learn.datasets.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
scores = est.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores.keys())
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testWrongInput(self):
def other_input_fn():
return {'other': tf.constant([0, 0, 0])}, tf.constant([0, 0, 0])
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaises(ValueError):
est.fit(input_fn=other_input_fn, steps=1)
def testMonitors(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn,
steps=21,
monitors=[CheckCallsMonitor(expect_calls=21)])
def testSummaryWriting(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200)
est.evaluate(input_fn=boston_input_fn, steps=200)
loss_summary = tf.contrib.testing.simple_values_from_events(
tf.contrib.testing.latest_events(est.model_dir), ['OptimizeLoss/loss'])
self.assertEqual(1, len(loss_summary))
def testLossInGraphCollection(self):
class _LossCheckerHook(tf.train.SessionRunHook):
def begin(self):
self.loss_collection = tf.get_collection(tf.GraphKeys.LOSSES)
hook = _LossCheckerHook()
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200, monitors=[hook])
self.assertTrue(hook.loss_collection)
def test_export_returns_exported_dirname(self):
expected = '/path/to/some_dir'
with tf.test.mock.patch.object(estimator, 'export') as mock_export_module:
mock_export_module._export_estimator.return_value = expected
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
actual = est.export('/path/to')
self.assertEquals(expected, actual)
class InferRealValuedColumnsTest(tf.test.TestCase):
def testInvalidArgs(self):
with self.assertRaisesRegexp(ValueError, 'x or input_fn must be provided'):
tf.contrib.learn.infer_real_valued_columns_from_input(None)
with self.assertRaisesRegexp(ValueError, 'cannot be tensors'):
tf.contrib.learn.infer_real_valued_columns_from_input(tf.constant(1.0))
def _assert_single_feature_column(
self, expected_shape, expected_dtype, feature_columns):
self.assertEqual(1, len(feature_columns))
feature_column = feature_columns[0]
self.assertEqual('', feature_column.name)
self.assertEqual({
'': tf.FixedLenFeature(shape=expected_shape, dtype=expected_dtype)
}, feature_column.config)
def testInt32Input(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
np.ones(shape=[7, 8], dtype=np.int32))
self._assert_single_feature_column([8], tf.int32, feature_columns)
def testInt32InputFn(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input_fn(
lambda: (tf.ones(shape=[7, 8], dtype=tf.int32), None))
self._assert_single_feature_column([8], tf.int32, feature_columns)
def testInt64Input(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
np.ones(shape=[7, 8], dtype=np.int64))
self._assert_single_feature_column([8], tf.int64, feature_columns)
def testInt64InputFn(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input_fn(
lambda: (tf.ones(shape=[7, 8], dtype=tf.int64), None))
self._assert_single_feature_column([8], tf.int64, feature_columns)
def testFloat32Input(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
np.ones(shape=[7, 8], dtype=np.float32))
self._assert_single_feature_column([8], tf.float32, feature_columns)
def testFloat32InputFn(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input_fn(
lambda: (tf.ones(shape=[7, 8], dtype=tf.float32), None))
self._assert_single_feature_column([8], tf.float32, feature_columns)
def testFloat64Input(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
np.ones(shape=[7, 8], dtype=np.float64))
self._assert_single_feature_column([8], tf.float64, feature_columns)
def testFloat64InputFn(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input_fn(
lambda: (tf.ones(shape=[7, 8], dtype=tf.float64), None))
self._assert_single_feature_column([8], tf.float64, feature_columns)
def testBoolInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
tf.contrib.learn.infer_real_valued_columns_from_input(
np.array([[False for _ in xrange(8)] for _ in xrange(7)]))
def testBoolInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
tf.contrib.learn.infer_real_valued_columns_from_input_fn(
lambda: (tf.constant(False, shape=[7, 8], dtype=tf.bool), None))
def testStringInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
tf.contrib.learn.infer_real_valued_columns_from_input(
np.array([['%d.0' % i for i in xrange(8)] for _ in xrange(7)]))
def testStringInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
tf.contrib.learn.infer_real_valued_columns_from_input_fn(
lambda: (
tf.constant([['%d.0' % i for i in xrange(8)] for _ in xrange(7)]),
None))
def testBostonInputFn(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input_fn(
boston_input_fn)
self._assert_single_feature_column(
[_BOSTON_INPUT_DIM], tf.float64, feature_columns)
def testIrisInputFn(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input_fn(
iris_input_fn)
self._assert_single_feature_column(
[_IRIS_INPUT_DIM], tf.float64, feature_columns)
class ReplicaDeviceSetterTest(tf.test.TestCase):
def testVariablesAreOnPs(self):
tf_config = {'cluster': {tf.contrib.learn.TaskType.PS: ['fake_ps_0']}}
with tf.test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = tf.contrib.learn.RunConfig()
with tf.device(estimator._get_replica_device_setter(config)):
v = tf.Variable([1, 2])
w = tf.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker', a.device)
def testVariablesAreLocal(self):
with tf.device(estimator._get_replica_device_setter(
tf.contrib.learn.RunConfig())):
v = tf.Variable([1, 2])
w = tf.Variable([2, 1])
a = v + w
self.assertDeviceEqual('', v.device)
self.assertDeviceEqual('', v.initializer.device)
self.assertDeviceEqual('', w.device)
self.assertDeviceEqual('', w.initializer.device)
self.assertDeviceEqual('', a.device)
def testMutableHashTableIsOnPs(self):
tf_config = {'cluster': {tf.contrib.learn.TaskType.PS: ['fake_ps_0']}}
with tf.test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = tf.contrib.learn.RunConfig()
with tf.device(estimator._get_replica_device_setter(config)):
default_val = tf.constant([-1, -1], tf.int64)
table = tf.contrib.lookup.MutableHashTable(tf.string,
tf.int64,
default_val)
input_string = tf.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('/job:ps/task:0', table._table_ref.device)
self.assertDeviceEqual('/job:ps/task:0', output.device)
def testMutableHashTableIsLocal(self):
with tf.device(estimator._get_replica_device_setter(
tf.contrib.learn.RunConfig())):
default_val = tf.constant([-1, -1], tf.int64)
table = tf.contrib.lookup.MutableHashTable(tf.string,
tf.int64,
default_val)
input_string = tf.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('', table._table_ref.device)
self.assertDeviceEqual('', output.device)
def testTaskIsSetOnWorkerWhenJobNameIsSet(self):
tf_config = {
'cluster': {
tf.contrib.learn.TaskType.PS: ['fake_ps_0']
},
'task': {
'type': tf.contrib.learn.TaskType.WORKER,
'index': 3
}
}
with tf.test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = tf.contrib.learn.RunConfig()
with tf.device(estimator._get_replica_device_setter(config)):
v = tf.Variable([1, 2])
w = tf.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker/task:3', a.device)
if __name__ == '__main__':
tf.test.main()
|
hackerunion/root | refs/heads/master | home/server/portal.old/hackerunion.org-master/petri/common/lib/twiliotools.py | 1 | import re
from django.conf import settings
from django.utils import simplejson as json
from twilio import util
from twilio.rest import TwilioRestClient
from petri.common.utils.debug import debug
class TwilioException(Exception):
pass
def _get_client():
return TwilioRestClient(settings.TWILIO_SID, settings.TWILIO_AUTH)
def _get_utils():
return util.RequestValidator(settings.TWILIO_AUTH)
def outgoing_call(number, callback):
try:
callObj = _get_client().calls.create(from_=settings.TWILIO_CALLER, to=number, url=callback)
except Exception, e:
debug(str(e.__dict__), system="twilio-call")
raise TwilioException(e.message)
return callObj.status not in ['failed', 'busy', 'no-answer']
def outgoing_sms(number, body):
try:
smsObj = _get_client().sms.messages.create(from_=settings.TWILIO_TEXTER, to=number, body=body)
except Exception, e:
debug(str(e.__dict__), system="twilio-sms")
raise TwilioException(e.message)
return smsObj.status != 'failed'
def validate_request(request):
return _get_utils().validate("http://%s%s" % (request.get_host(), request.get_full_path()),
request.POST,
request.META.get('HTTP_X_TWILIO_SIGNATURE', ''))
|
joergdietrich/astropy | refs/heads/master | astropy/units/si.py | 4 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines the SI units. They are also available in the
`astropy.units` namespace.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ..constants import si as _si
from .core import UnitBase, Unit, def_unit
import numpy as _numpy
_ns = globals()
###########################################################################
# DIMENSIONLESS
def_unit(['percent', 'pct'], Unit(0.01), namespace=_ns, prefixes=False,
doc="percent: one hundredth of unity, factor 0.01",
format={'generic': '%', 'console': '%', 'cds': '%',
'latex': r'\%', 'unicode': '%'})
###########################################################################
# LENGTH
def_unit(['m', 'meter'], namespace=_ns, prefixes=True,
doc="meter: base unit of length in SI")
def_unit(['micron'], um, namespace=_ns,
doc="micron: alias for micrometer (um)",
format={'latex': r'\mu m', 'unicode': 'μm'})
def_unit(['Angstrom', 'AA', 'angstrom'], 0.1 * nm, namespace=_ns,
doc="ångström: 10 ** -10 m",
format={'latex': r'\mathring{A}', 'unicode': 'Å',
'vounit': 'Angstrom'})
###########################################################################
# VOLUMES
def_unit((['l', 'L'], ['liter']), 1000 * cm ** 3.0, namespace=_ns, prefixes=True,
format={'latex': r'\mathcal{l}', 'unicode': 'ℓ'},
doc="liter: metric unit of volume")
###########################################################################
# ANGULAR MEASUREMENTS
def_unit(['rad', 'radian'], namespace=_ns, prefixes=True,
doc="radian: angular measurement of the ratio between the length "
"on an arc and its radius")
def_unit(['deg', 'degree'], _numpy.pi / 180.0 * rad, namespace=_ns,
prefixes=True,
doc="degree: angular measurement 1/360 of full rotation",
format={'latex': r'{}^{\circ}', 'unicode': '°'})
def_unit(['hourangle'], 15.0 * deg, namespace=_ns, prefixes=False,
doc="hour angle: angular measurement with 24 in a full circle",
format={'latex': r'{}^{h}', 'unicode': 'ʰ'})
def_unit(['arcmin', 'arcminute'], 1.0 / 60.0 * deg, namespace=_ns,
prefixes=True,
doc="arc minute: angular measurement",
format={'latex': r'{}^{\prime}', 'unicode': '′'})
def_unit(['arcsec', 'arcsecond'], 1.0 / 3600.0 * deg, namespace=_ns,
prefixes=True,
doc="arc second: angular measurement")
# These special formats should only be used for the non-prefix versions
arcsec._format = {'latex': r'{}^{\prime\prime}', 'unicode': '″'}
def_unit(['mas'], 0.001 * arcsec, namespace=_ns,
doc="milli arc second: angular measurement")
def_unit(['uas'], 0.000001 * arcsec, namespace=_ns,
doc="micro arc second: angular measurement",
format={'latex': r'\mu as', 'unicode': 'μas'})
def_unit(['sr', 'steradian'], rad ** 2, namespace=_ns, prefixes=True,
doc="steradian: unit of solid angle in SI")
###########################################################################
# TIME
def_unit(['s', 'second'], namespace=_ns, prefixes=True,
exclude_prefixes=['a'],
doc="second: base unit of time in SI.")
def_unit(['min', 'minute'], 60 * s, prefixes=True, namespace=_ns)
def_unit(['h', 'hour', 'hr'], 3600 * s, namespace=_ns, prefixes=True,
exclude_prefixes=['p'])
def_unit(['d', 'day'], 24 * h, namespace=_ns, prefixes=True,
exclude_prefixes=['c', 'y'])
def_unit(['sday'], 86164.09053 * s, namespace=_ns,
doc="Sidereal day (sday) is the time of one rotation of the Earth.")
def_unit(['wk', 'week'], 7 * day, namespace=_ns)
def_unit(['fortnight'], 2 * wk, namespace=_ns)
def_unit(['a', 'annum'], 365.25 * d, namespace=_ns, prefixes=True,
exclude_prefixes=['P'])
def_unit(['yr', 'year'], 365.25 * d, namespace=_ns, prefixes=True)
###########################################################################
# FREQUENCY
def_unit(['Hz', 'Hertz', 'hertz'], 1 / s, namespace=_ns, prefixes=True,
doc="Frequency")
###########################################################################
# MASS
def_unit(['kg', 'kilogram'], namespace=_ns,
doc="kilogram: base unit of mass in SI.")
def_unit(['g', 'gram'], 1.0e-3 * kg, namespace=_ns, prefixes=True,
exclude_prefixes=['k', 'kilo'])
def_unit(['t', 'tonne'], 1000 * kg, namespace=_ns,
doc="Metric tonne")
###########################################################################
# AMOUNT OF SUBSTANCE
def_unit(['mol', 'mole'], namespace=_ns, prefixes=True,
doc="mole: amount of a chemical substance in SI.")
###########################################################################
# TEMPERATURE
def_unit(
['K', 'Kelvin'], namespace=_ns, prefixes=True,
doc="Kelvin: temperature with a null point at absolute zero.")
def_unit(
['deg_C', 'Celsius'], namespace=_ns, doc='Degrees Celsius',
format={'latex': r'{}^{\circ}C', 'unicode': '°C'})
###########################################################################
# FORCE
def_unit(['N', 'Newton', 'newton'], kg * m * s ** -2, namespace=_ns,
prefixes=True, doc="Newton: force")
##########################################################################
# ENERGY
def_unit(['J', 'Joule', 'joule'], N * m, namespace=_ns, prefixes=True,
doc="Joule: energy")
def_unit(['eV', 'electronvolt'], _si.e.value * J, namespace=_ns, prefixes=True,
doc="Electron Volt")
##########################################################################
# PRESSURE
def_unit(['Pa', 'Pascal', 'pascal'], J * m ** -3, namespace=_ns, prefixes=True,
doc="Pascal: pressure")
def_unit(['bar'], 1e5 * Pa, namespace=_ns,
doc="bar: pressure")
###########################################################################
# POWER
def_unit(['W', 'Watt', 'watt'], J / s, namespace=_ns, prefixes=True,
doc="Watt: power")
###########################################################################
# ELECTRICAL
def_unit(['A', 'ampere', 'amp'], namespace=_ns, prefixes=True,
doc="ampere: base unit of electric current in SI")
def_unit(['C', 'coulomb'], A * s, namespace=_ns, prefixes=True,
doc="coulomb: electric charge")
def_unit(['V', 'Volt', 'volt'], J * C ** -1, namespace=_ns, prefixes=True,
doc="Volt: electric potential or electromotive force")
def_unit((['Ohm', 'ohm'], ['Ohm']), V * A ** -1, namespace=_ns, prefixes=True,
doc="Ohm: electrical resistance",
format={'latex': r'\Omega', 'unicode': 'Ω'})
def_unit(['S', 'Siemens', 'siemens'], A * V ** -1, namespace=_ns,
prefixes=True, doc="Siemens: electrical conductance")
def_unit(['F', 'Farad', 'farad'], C * V ** -1, namespace=_ns, prefixes=True,
doc="Farad: electrical capacitance")
###########################################################################
# MAGNETIC
def_unit(['Wb', 'Weber', 'weber'], V * s, namespace=_ns, prefixes=True,
doc="Weber: magnetic flux")
def_unit(['T', 'Tesla', 'tesla'], Wb * m ** -2, namespace=_ns, prefixes=True,
doc="Tesla: magnetic flux density")
def_unit(['H', 'Henry', 'henry'], Wb * A ** -1, namespace=_ns, prefixes=True,
doc="Henry: inductance")
###########################################################################
# ILLUMINATION
def_unit(['cd', 'candela'], namespace=_ns, prefixes=True,
doc="candela: base unit of luminous intensity in SI")
def_unit(['lm', 'lumen'], cd * sr, namespace=_ns, prefixes=True,
doc="lumen: luminous flux")
def_unit(['lx', 'lux'], lm * m ** -2, namespace=_ns, prefixes=True,
doc="lux: luminous emittence")
###########################################################################
# RADIOACTIVITY
def_unit(['Bq', 'becquerel'], Hz, namespace=_ns, prefixes=False,
doc="becquerel: unit of radioactivity")
def_unit(['Ci', 'curie'], Bq * 3.7e10, namespace=_ns, prefixes=False,
doc="curie: unit of radioactivity")
###########################################################################
# BASES
bases = set([m, s, kg, A, cd, rad, K, mol])
###########################################################################
# CLEANUP
del UnitBase
del Unit
del def_unit
###########################################################################
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import generate_unit_summary as _generate_unit_summary
if __doc__ is not None:
__doc__ += _generate_unit_summary(globals())
|
spiiph/owls-hep | refs/heads/master | owls_hep/region.py | 1 | """Provides models for regions
"""
# Future imports
from __future__ import print_function
# System imports
from copy import deepcopy
# owls-hep imports
from owls_hep.expression import multiplied
from owls_hep.variations import Variation
# Set up default exports
__all__ = [
'Region'
]
class Region(object):
"""Represents a region (a selection and weight) in which processes can be
evaluated.
"""
def __init__(self,
selection,
weight,
label,
sample_weights = {},
metadata = {}):
"""Initialized a new instance of the Region class.
Args:
selection: A string representing selection for the region, or an
empty string for no selection
weight: A string representing the weight for the region, or an
empty string for no weighting
label: The ROOT TLatex label string to use when rendering the
region
sample_weights: Weights to apply to the selection based on sample
type. Should match the sample types of the processes, for
example 'mc' and 'data'.
metadata: A (pickleable) object containing optional metadata
"""
# Store parameters
self._selection = selection
self._weight = weight
self._label = label
self._sample_weights = sample_weights
self._metadata = metadata
self._weighted = True
# Create initial variations container
self._variations = ()
def __hash__(self):
"""Returns a hash for state of the region.
"""
# Only hash those parameters which affect evaluation
return hash(self.state())
def state(self):
"""Returns the state of the region.
"""
# Only hash those parameters which affect evaluation
# TODO: Switch to using self._selection_weight() and add sample_type
# as an input parameter (if possible)
return (
self._selection,
self._weight,
self._weighted,
self._variations,
tuple(sorted(self._sample_weights.iteritems())),
)
def __str__(self):
"""Returns the string representation of the region.
"""
return 'Region({0}, {1})'.format(self._label, self.state())
def label(self):
"""Returns the label for the region, if any.
"""
if isinstance(self._label, list) or isinstance(self._label, tuple):
return self._label
else:
return [self._label]
def metadata(self):
"""Returns metadata for this region, if any.
"""
return self._metadata
def varied(self, variations, label = None):
"""Creates a copy of the region with the specified variation applied.
Args:
variations: The variation(s) to apply
Returns:
A duplicate region, but with the specified variation applied.
"""
# Create the copy
result = deepcopy(self)
# Add the variation
# NOTE: It's useful to check if the variation actually is a subclass
# of Variation. Mistakes here are particularly hard to decode.
if isinstance(variations, tuple):
for v in variations:
if not isinstance(v, Variation):
raise TypeError('{} is not a subclass of Variation'. \
format(v))
result._variations += variations
else:
if not isinstance(variations, Variation):
raise TypeError('{} is not a subclass of Variation'. \
format(variations))
result._variations += (variations,)
if label is not None:
result._label = label
return result
# NOTE: This function is obsolete. I don't know when it would never be
# useful, since weights are such an integral part of the simulation.
#def weighted(self, weighting_enabled):
#"""Creates a copy of the region with weighting turned on or off.
#If there is no change to the weighting, self will be returned.
#Args:
#weighting_enabled: Whether or not to enable weighting
#Returns:
#A duplicate region, but with weighting set to weighting_enabled.
#"""
## If there's no change, return self
#if weighting_enabled == self._weighted:
#return self
## Create a copy
#result = copy(self)
## Change weighting status
#result._weighted = weighting_enabled
## All done
#return result
def selection(self):
"""Returns a string of "selection * weight" with all variations
applied.
"""
# Grab resultant weight/selection
selection = self._selection
# Apply any variations
for v in self._variations:
selection, _ = v(selection, '')
return selection
def selection_weight(self, sample_type):
"""Returns a string of "selection * weight" with all variations
applied.
"""
# Grab resultant weight/selection
selection = self._selection
try:
weight = multiplied(self._weight,
self._sample_weights[sample_type])
except:
weight = self._weight
# Apply any variations
for v in self._variations:
selection, weight = v(selection, weight)
# Return the product of the selection and weight expressions
return multiplied(selection, weight)
|
kostya-sh/FrameworkBenchmarks | refs/heads/master | toolset/benchmark/__init__.py | 90 | # Benchmark
|
qk4l/Flexget | refs/heads/develop | flexget/tests/test_series_premiere.py | 5 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import pytest
from jinja2 import Template
@pytest.fixture(scope='class', params=['internal', 'guessit'], ids=['internal', 'guessit'])
def config(request):
"""Override and parametrize default config fixture for all series tests."""
return Template(request.cls.config).render({'parser': request.param})
class TestSeriesPremiere(object):
config = """
templates:
global:
parsing:
series: {{parser}}
disable: [seen] # just cleans log a bit ..
tasks:
test_only_one:
mock:
- title: Foo's.&.Bar's.2009.S01E01.HDTV.XviD-2HD[FlexGet]
- title: Foos and Bars 2009 S01E01 HDTV XviD-2HD[ASDF]
- title: Foo's & Bars (2009) S01E01 720p XviD-2HD[AOEU]
- title: Foos&bars-2009-S01E01 1080p x264
- title: Foos and Bars 2009 S01E02 HDTV Xvid-2HD[AOEU]
series_premiere: yes
test_dupes_across_tasks_1:
mock:
- {title: 'Foo.Bar.2009.S01E01.HDTV.XviD-2HD[FlexGet]'}
series_premiere: yes
test_dupes_across_tasks_2:
mock:
- {title: 'foo bar (2009) s01e01 dsr xvid-2hd[dmg]'}
series_premiere: yes
test_path_set:
mock:
- {title: 'foo bar s01e01 hdtv'}
series_premiere:
path: .
test_pilot_and_premiere:
mock:
- {title: 'foo bar s01e00 hdtv'}
- {title: 'foo bar s01e01 hdtv'}
series_premiere: yes
test_no_teasers:
mock:
- {title: 'foo bar s01e00 hdtv'}
- {title: 'foo bar s01e01 hdtv'}
series_premiere:
allow_teasers: no
test_multi_episode:
mock:
- {title: 'foo bar s01e01e02 hdtv'}
series_premiere: yes
test_rerun:
mock:
- title: theshow s01e01
- title: theshow s01e02
series_premiere: yes
rerun: 1
test_no_rerun_with_series:
mock:
- title: theshow s01e01
- title: theshow s01e02
series_premiere: yes
series:
- theshow
rerun: 0
test_no_rerun:
mock:
- title: theshow s01e01
- title: theshow s01e02
series_premiere: yes
rerun: 0
test_no_configured_1:
series:
- explicit show
test_no_configured_2:
series_premiere: yes
mock:
- title: explicit show s01e01
- title: other show s01e01
"""
def test_only_one(self, execute_task):
task = execute_task('test_only_one')
assert len(task.accepted) == 1, 'should only have accepted one'
assert not task.find_entry('accepted', title='Foos and Bars 2009 S01E02 HDTV Xvid-2HD[AOEU]'), \
'Non premiere accepted'
def test_dupes_across_tasks(self, execute_task):
task = execute_task('test_dupes_across_tasks_1')
assert len(task.accepted) == 1, 'didn\'t accept first premiere'
task = execute_task('test_dupes_across_tasks_2')
assert len(task.accepted) == 0, 'accepted duplicate premiere'
def test_path_set(self, execute_task):
task = execute_task('test_path_set')
assert task.find_entry(title='foo bar s01e01 hdtv', path='.')
def test_pilot_and_premiere(self, execute_task):
task = execute_task('test_pilot_and_premiere')
assert len(task.accepted) == 2, 'should have accepted pilot and premiere'
def test_no_teasers(self, execute_task):
task = execute_task('test_no_teasers')
assert len(task.accepted) == 1, 'should have accepted only premiere'
assert not task.find_entry('accepted', title='foo bar s01e00 hdtv')
def test_multi_episode(self, execute_task):
task = execute_task('test_multi_episode')
assert len(task.accepted) == 1, 'should have accepted multi-episode premiere'
def test_rerun(self, execute_task):
task = execute_task('test_rerun')
assert not task.find_entry('accepted', title='theshow s01e02'), 'accepted non-premiere'
def test_no_rerun_with_series(self, execute_task):
task = execute_task('test_no_rerun_with_series')
assert task.find_entry('accepted', title='theshow s01e02'), 'should be accepted by series'
def test_no_rerun(self, execute_task):
task = execute_task('test_no_rerun')
assert not task.find_entry('accepted', title='theshow s01e02'), 'accepted non-premiere'
def test_no_configured_shows(self, execute_task):
task = execute_task('test_no_configured_1')
task = execute_task('test_no_configured_2')
entry = task.find_entry(title='explicit show s01e01')
assert not entry.accepted
entry = task.find_entry(title='other show s01e01')
assert entry.accepted
|
johnloucaides/chipsec | refs/heads/master | chipsec/xmlout.py | 1 | #!/usr/bin/python
#CHIPSEC: Platform Security Assessment Framework
#Copyright (c) 2010-2015, Intel Corporation
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; Version 2.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#Contact information:
#chipsec@intel.com
#
import time
import sys
import traceback
import os
from os.path import basename
import xml.etree.ElementTree as ET
import platform
import xml.dom.minidom
class xmlAux:
"""Used to represent the variables to handle the xml output."""
def __init__(self):
"""The Constructor."""
self.test_cases = []
self.useXML = False
self.testCase = None
self.class_name = None
self.xmlFile = None
self.xmlStdout = ""
self.xmlStderr = None
self.properties = []
def add_test_suite_property(self, name, value):
"""Adds a <property> child node to the <testsuite>."""
if name is not None and value is not None:
self.properties.append( ts_property( str(name).strip(), str(value).strip() ) )
def set_xml_file(self, name):
"""Sets the filename used for the XML output."""
if name != None:
self.useXML = True
self.xmlFile = name
def append_stdout(self,msg):
self.xmlStdout += str(msg) + "\n"
def _check_testCase_exist(self):
if self.testCase is None:
if self.class_name is None:
self.testCase = xmlTestCase( "test name", "class.name" )
else:
self.testCase = xmlTestCase( self.class_name, self.class_name )
def _end_test(self):
try:
self.testCase.set_time()
self.testCase.add_stdout_info( self.xmlStdout )
self.test_cases.append( self.testCase )
self.testCase = None
except:
print "Unexpected error:", sys.exc_info() [0]
raise
def passed_check(self):
"""Used when you want to mark a testcase as PASS and add it to the testsuite."""
if self.useXML == True:
self._check_testCase_exist()
self._end_test()
def failed_check(self, text):
"""Used when you want to mark a testcase as FAILURE and add it to the testsuite."""
if self.useXML == True:
self._check_testCase_exist()
self.testCase.add_failure_info( text, None )
self._end_test()
def error_check(self, text):
"""Used when you want to mark a testcase as ERROR and add it to the testsuite."""
if self.useXML == True:
self._check_testCase_exist()
self.testCase.add_error_info( text, None )
self._end_test()
def skipped_check(self, text):
"""Used when you want to mark a testcase as SKIPPED and add it to the testsuite."""
if self.useXML == True:
self._check_testCase_exist()
self.testCase.add_skipped_info( text, None )
self._end_test()
def information_check(self, text):
if self.useXML == True:
self._check_testCase_exist()
self.testCase.add_information_info( text, None )
self._end_test()
def start_test(self, test_name):
"""Starts the test/testcase."""
self.xmlStdout = ""
if self.useXML == True:
self.testCase = xmlTestCase( test_name, self.class_name )
def start_module( self, module_name ):
"""Logs the start point of a Test, this is used for XML output.
If XML file was not specified, it will just display a banner for the test name.
"""
if self.useXML == True:
self.class_name = module_name
if self.testCase is not None:
#If there's a test that did not send a status, so mark it as passed.
self.passed_check( )
self.xmlStdout = ""
def end_module( self, module_name ):
if self.useXML == True:
self.class_name = ""
if self.testCase is not None:
#If there's a test that did not send a status, so mark it as passed.
self.passed_check( )
self.xmlStdout = ""
def saveXML( self ):
"""Saves the XML info to a file in a JUnit style."""
if not self.useXML or self.xmlFile is None: return False
filename = self.xmlFile.replace("'", "")
ts = xmlTestSuite( basename( os.path.splitext(filename)[0] ) )
ts.test_cases = self.test_cases
if self.properties is not None and len( self.properties ) > 0: ts.properties = self.properties
ts.to_file( filename )
print "[CHIPSEC] Saving output to XML file: %s" % str( os.path.abspath( filename ) )
return True
class testCaseType:
"""Used to represent the types of TestCase that can be assigned (FAILURE, ERROR, SKIPPED, PASS, INFORMATION)"""
FAILURE = 1
ERROR = 2
SKIPPED = 3
INFORMATION = 4
PASS = 5
class xmlTestCase():
"""Represents a JUnit test case with a result and possibly some stdout or stderr"""
def __init__(self, name, classname, pTime=None, stdout=None, stderr=None, tcType=None, message=None, output=None):
"""The Constructor"""
self.name = name
self.time = None
self.startTime = time.time()
self.endTime = None
if pTime is not None:
self.time = pTime
self.stdout = stdout
self.stderr = stderr
self.classname = classname
self.tcType = tcType
self.tcMessage = message
self.tcOutput = output
#Just to be compatible with junit_xml
self.error_message = ""
self.error_output = ""
self.failure_message = ""
self.failure_output = ""
self.skipped_message = ""
self.skipped_output = ""
self.information_message = ""
self.information_output = ""
if tcType == testCaseType.ERROR:
self.error_message = message
self.error_output = output
elif tcType == testCaseType.FAILURE:
self.failure_message = message
self.failure_output = output
elif tcType == testCaseType.SKIPPED:
self.skipped_message = message
self.skipped_output = output
elif tcType == testCaseType.INFORMATION:
self.information_message = message
self.information_output = output
else:
#Then it should be PASSED.
self.tcType = testCaseType.PASS
def is_skipped(self):
"""Returns True if the testCase is of Type Skipped, if not returns False"""
if self.tcType == testCaseType.SKIPPED:
return True
else:
False
def is_error(self):
"""Returns True if the testCase is of Type Error, if not returns False"""
if self.tcType == testCaseType.ERROR:
return True
else:
False
def is_failure(self):
"""Returns True if the testCase is of Type Failure, if not returns False"""
if self.tcType == testCaseType.FAILURE:
return True
else:
False
def is_pass(self):
"""Returns True if the testCase is of Type Pass, if not returns False."""
if self.tcType not in [testCaseType.ERROR, testCaseType.FAILURE, testCaseType.SKIPPED, testCaseType.INFORMATION] or self.tcType == testCaseType.PASS:
return True
else:
False
def is_information(self):
"""Returns True if the testCase is of Type Information, if not returns False."""
if self.tcType == testCaseType.INFORMATION:
return True
else:
False
def add_failure_info(self, message=None, output=None):
"""Sets the values for the corresponding Type Failure."""
self.tcType = testCaseType.FAILURE
self.tcMessage = message
self.tcOutput = output
#To be compatible with junit_xml
self.failure_message = message
self.failure_output = output
def add_error_info(self, message=None, output=None):
"""Sets the values for the corresponding Type Error."""
self.tcType = testCaseType.ERROR
self.tcMessage = message
self.tcOutput = output
#To be compatible with junit_xml
self.error_message = message
self.error_output = output
def add_skipped_info(self, message=None, output=None):
"""Sets the values for the corresponding Type Skipped."""
self.tcType = testCaseType.SKIPPED
self.tcMessage = message
self.tcOutput = output
#To be compatible with junit_xml
self.skipped_message = message
self.skipped_output = output
def add_information_info(self, message=None, output=None):
"""Sets the values for the corresponding Type Information."""
self.tcType = testCaseType.INFORMATION
self.tcMessage = message
self.tcOutput = output
#To be compatible with junit_xml
self.information_message = message
self.information_output = output
def add_stdout_info(self, text):
"""Adds the text that is going to be part of the stdout for the TestCase."""
if self.stdout is not None:
self.stdout += str(text)
else:
self.stdout = str(text)
def add_stderr_info(self, text):
"""Adds the text that is going to be part of the stderr for the TestCase."""
if self.stderr is not None:
self.stderr += str(text)
else:
self.stderr = str(text)
def set_time(self, pTime=None):
"""Sets the time"""
if pTime is not None:
self.time = pTime
else:
self.endTime = time.time()
self.time = self.endTime - self.startTime
class xmlTestSuite(object):
"""Suite of test cases, it's the father node for TestCase."""
def __init__(self, name, test_cases=None, hostname=None, ts_id=None, package=None, timestamp=None, properties=None):
"""The Constructor."""
self.name = name
if not test_cases:
test_cases = []
self.test_cases = test_cases
self.hostname = hostname
self.ts_id = ts_id
self.package = package
self.timestamp = timestamp
self.properties = properties
def to_xml_string(self):
"""Returns the string representation of the JUnit XML document."""
try:
iter( self.test_cases )
except TypeError:
raise Exception('test_suite has no test cases')
strXML = TestSuite.to_xml_string( TestSuite(self.name, self.test_cases,
self.hostname, self.ts_id, self.package,
self.timestamp, self.properties)
)
return strXML
def to_file(self, file_name):
"""Writes the JUnit XML document to a file.
In case of any error, it will print the exception information.
"""
with open( file_name, 'wb') as f :
#f.write( '<?xml-stylesheet type="text/xsl" href="junit.xsl"?>' )
f.write( self.to_xml_string() )
class ts_property(object):
"""Class to represent a TestSuite property."""
def __init__(self, name, value):
"""The constructor."""
self.name = name
self.value = value
class TestSuite(object):
"""Suite of test cases"""
def __init__(self, name, test_cases, hostname, ts_id, package, timestamp, properties):
self.name = name
if not test_cases:
test_cases = []
try:
iter( test_cases )
except:
pass
self.test_cases = test_cases
self.hostname = hostname
self.ts_id = ts_id
self.package = package
self.timestamp = timestamp
if not properties:
self.properties = []
else:
self.properties = properties
def build_xml(self):
"""Builds the XML elements."""
ts_attributes = dict()
if self.name:
ts_attributes["name"] = str( self.name )
else:
ts_attributes["name"] = "name"
if self.hostname:
ts_attributes["hostname"] = str( self.hostname )
if self.ts_id:
ts_attributes["id"] = str( self.ts_id )
if self.package:
ts_attributes["package"] = str( self.package )
if self.timestamp:
ts_attributes["timestamp"] = str( self.timestamp )
ts_attributes['failures'] = str( len( [tc for tc in self.test_cases if tc.is_failure()] ) )
ts_attributes['errors'] = str( len( [tc for tc in self.test_cases if tc.is_error()] ) )
ts_attributes['skipped'] = str( len( [tc for tc in self.test_cases if tc.is_skipped()] ) )
ts_attributes['information'] = str( len( [tc for tc in self.test_cases if tc.is_information()] ) )
#ts_attributes["time"] = str( sum( [tc.time for tc in self.test_cases if tc.time] ) )
ts_attributes["time"] = "%.5f" % sum( [tc.time for tc in self.test_cases if tc.time] )
ts_attributes["tests"] = str( len( self.test_cases ) )
xml_element = ET.Element( "testsuite", ts_attributes )
if len(self.properties) > 0:
ps_element = ET.SubElement( xml_element, "properties" )
temp = dict()
for p in self.properties:
temp["name"] = p.name
temp["value"] = p.value
py_element = ET.SubElement( ps_element, "property", temp )
for tc in self.test_cases:
tc_attributes = dict()
tc_attributes['name'] = str( tc.name )
if tc.time:
tc_attributes['time'] = "%.5f" % tc.time
if tc.classname:
tc_attributes['classname'] = str( tc.classname )
tc_element = ET.SubElement( xml_element, "testcase", tc_attributes )
#For the is_pass() case, just log a 'pass' tag.
if tc.is_pass():
pass_element = ET.SubElement( tc_element, "pass", {'type':'pass'} )
elif tc.is_information():
information_element = ET.SubElement( tc_element, "information", {'type': 'information'} )
if tc.information_message:
information_element.set('message', tc.information_message)
if tc.information_output:
information_output = tc.information_output
elif tc.is_failure():
failure_element = ET.SubElement( tc_element, "failure", {'type': 'failure'} )
if tc.failure_message:
failure_element.set( 'message', tc.failure_message )
if tc.failure_output:
failure_element.text = tc.failure_output
elif tc.is_error():
error_element = ET.SubElement( tc_element, "error", {'type': 'error'} )
if tc.error_message:
error_element.set( 'message', tc.error_message )
if tc.error_output:
error_element.text = tc.error_output
elif tc.is_skipped():
skipped_element = ET.SubElement( tc_element, "skipped", {'type': 'skipped'} )
if tc.skipped_message:
skipped_element.set( 'message', tc.skipped_message )
if tc.skipped_output:
skipped_element.text = tc.skipped_output
#system-out and system-err are common for all, so here we go.
if tc.stdout:
stdout_element = ET.SubElement( tc_element, "system-out" )
stdout_element.text = tc.stdout
if tc.stderr:
stderr_element = ET.SubElement( tc_element, "system-err" )
stderr_element.text = tc.stderr
return xml_element
def to_xml_string(self):
"""Returns a string representation of the XML Tree for the TestSuite."""
xml_element = ET.Element("testsuites")
xml_element2 = self.build_xml()
xml_element.append( xml_element2 )
xml_string = ET.tostring( xml_element, None, None )
if platform.system().lower() in ["windows", "linux"]:
xml_string = xml.dom.minidom.parseString(xml_string).toprettyxml()
return xml_string
|
Pablo126/SSBW | refs/heads/master | Entrega1/lib/python3.5/site-packages/django/db/backends/oracle/compiler.py | 46 | from django.db.models.sql import compiler
class SQLCompiler(compiler.SQLCompiler):
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Creates the SQL for this query. Returns the SQL string and list
of parameters. This is overridden from the original Query class
to handle the additional SQL Oracle requires to emulate LIMIT
and OFFSET.
If 'with_limits' is False, any limit/offset information is not
included in the query.
"""
# The `do_offset` flag indicates whether we need to construct
# the SQL needed to use limit/offset with Oracle.
do_offset = with_limits and (self.query.high_mark is not None or self.query.low_mark)
if not do_offset:
sql, params = super(SQLCompiler, self).as_sql(
with_limits=False,
with_col_aliases=with_col_aliases,
)
else:
sql, params = super(SQLCompiler, self).as_sql(
with_limits=False,
with_col_aliases=True,
)
# Wrap the base query in an outer SELECT * with boundaries on
# the "_RN" column. This is the canonical way to emulate LIMIT
# and OFFSET on Oracle.
high_where = ''
if self.query.high_mark is not None:
high_where = 'WHERE ROWNUM <= %d' % (self.query.high_mark,)
if self.query.low_mark:
sql = (
'SELECT * FROM (SELECT "_SUB".*, ROWNUM AS "_RN" FROM (%s) '
'"_SUB" %s) WHERE "_RN" > %d' % (sql, high_where, self.query.low_mark)
)
else:
# Simplify the query to support subqueries if there's no offset.
sql = (
'SELECT * FROM (SELECT "_SUB".* FROM (%s) "_SUB" %s)' % (sql, high_where)
)
return sql, params
class SQLInsertCompiler(compiler.SQLInsertCompiler, SQLCompiler):
pass
class SQLDeleteCompiler(compiler.SQLDeleteCompiler, SQLCompiler):
pass
class SQLUpdateCompiler(compiler.SQLUpdateCompiler, SQLCompiler):
pass
class SQLAggregateCompiler(compiler.SQLAggregateCompiler, SQLCompiler):
pass
|
EthanBlackburn/sync-engine | refs/heads/master | migrations/versions/050_imap_table_cleanups.py | 1 | """imap table cleanups
Revision ID: 29217fad3f46
Revises: 161b88c17615
Create Date: 2014-07-01 18:56:55.962529
"""
# revision identifiers, used by Alembic.
revision = '29217fad3f46'
down_revision = '1b751e8d9cac'
from alembic import op
import sqlalchemy as sa
def upgrade():
from inbox.models.session import session_scope
from inbox.models.folder import Folder
from inbox.sqlalchemy_ext.util import JSON
from inbox.ignition import main_engine
engine = main_engine(pool_size=1, max_overflow=0)
### foldersync => imapfoldersyncstatus
# note that renaming a table does in fact migrate constraints + indexes too
op.rename_table('foldersync', 'imapfoldersyncstatus')
op.alter_column('imapfoldersyncstatus', '_sync_status',
existing_type=JSON(), nullable=True,
new_column_name='_metrics')
op.add_column('imapfoldersyncstatus',
sa.Column('folder_id', sa.Integer(), nullable=False))
### uidvalidity => imapfolderinfo
op.rename_table('uidvalidity', 'imapfolderinfo')
op.alter_column('imapfolderinfo', 'uid_validity',
existing_type=sa.Integer(), nullable=False,
new_column_name='uidvalidity')
op.alter_column('imapfolderinfo', 'highestmodseq',
existing_type=sa.Integer(), nullable=True)
op.drop_constraint('imapfolderinfo_ibfk_1',
'imapfolderinfo', type_='foreignkey')
op.alter_column('imapfolderinfo', 'imapaccount_id',
existing_type=sa.Integer(), nullable=False,
new_column_name='account_id')
op.create_foreign_key('imapfolderinfo_ibfk_1',
'imapfolderinfo', 'imapaccount',
['account_id'], ['id'])
op.add_column('imapfolderinfo',
sa.Column('folder_id', sa.Integer(), nullable=False))
### imapuid
op.drop_constraint('imapuid_ibfk_1', 'imapuid', type_='foreignkey')
op.alter_column('imapuid', 'imapaccount_id',
existing_type=sa.Integer(), nullable=False,
new_column_name='account_id')
op.create_foreign_key('imapuid_ibfk_1',
'imapuid', 'imapaccount', ['account_id'], ['id'])
### migrate data and add new constraints
Base = sa.ext.declarative.declarative_base()
Base.metadata.reflect(engine)
if 'easfoldersync' in Base.metadata.tables:
op.rename_table('easfoldersync', 'easfoldersyncstatus')
op.add_column('easfoldersyncstatus',
sa.Column('folder_id', sa.Integer(), nullable=False))
op.alter_column('easfoldersyncstatus', '_sync_status',
existing_type=JSON(), nullable=True,
new_column_name='_metrics')
Base.metadata.reflect(engine)
class EASFolderSyncStatus(Base):
__table__ = Base.metadata.tables['easfoldersyncstatus']
class ImapFolderSyncStatus(Base):
__table__ = Base.metadata.tables['imapfoldersyncstatus']
class ImapFolderInfo(Base):
__table__ = Base.metadata.tables['imapfolderinfo']
with session_scope(versioned=False, ignore_soft_deletes=False) \
as db_session:
folder_id_for = dict([((account_id, name.lower()), id_)
for id_, account_id, name in
db_session.query(Folder.id, Folder.account_id,
Folder.name)])
for status in db_session.query(ImapFolderSyncStatus):
print "migrating", status.folder_name
status.folder_id = folder_id_for[
(status.account_id, status.folder_name.lower())]
db_session.commit()
if 'easfoldersyncstatus' in Base.metadata.tables:
for status in db_session.query(EASFolderSyncStatus):
print "migrating", status.folder_name
folder_id = folder_id_for.get(
(status.account_id, status.folder_name.lower()))
if folder_id is not None:
status.folder_id = folder_id
else:
# EAS folder rows *may* not exist if have no messages
folder = Folder(account_id=status.account_id, name=status.folder_name)
db_session.add(folder)
db_session.commit()
status.folder_id = folder.id
db_session.commit()
# some weird alembic bug? need to drop and recreate this FK
op.drop_constraint('easfoldersyncstatus_ibfk_1',
'easfoldersyncstatus', type_='foreignkey')
op.drop_column('easfoldersyncstatus', 'folder_name')
op.create_foreign_key('easfoldersyncstatus_ibfk_1',
'easfoldersyncstatus',
'easaccount', ['account_id'], ['id'])
op.create_foreign_key('easfoldersyncstatus_ibfk_2',
'easfoldersyncstatus', 'folder',
['folder_id'], ['id'])
op.create_unique_constraint('account_id', 'easfoldersyncstatus',
['account_id', 'folder_id'])
# some weird alembic bug? need to drop and recreate this FK
op.drop_constraint('imapfoldersyncstatus_ibfk_1', 'imapfoldersyncstatus',
type_='foreignkey')
op.drop_constraint('account_id', 'imapfoldersyncstatus', type_='unique')
op.drop_column('imapfoldersyncstatus', 'folder_name')
op.create_foreign_key('imapfoldersyncstatus_ibfk_1',
'imapfoldersyncstatus',
'imapaccount', ['account_id'], ['id'])
op.create_foreign_key('imapfoldersyncstatus_ibfk_2',
'imapfoldersyncstatus', 'folder',
['folder_id'], ['id'])
op.create_unique_constraint('account_id', 'imapfoldersyncstatus',
['account_id', 'folder_id'])
with session_scope(versioned=False, ignore_soft_deletes=False) \
as db_session:
for info in db_session.query(ImapFolderInfo):
print "migrating", info.folder_name
info.folder_id = folder_id_for[
(info.account_id, info.folder_name.lower())]
db_session.commit()
# some weird alembic bug? need to drop and recreate this FK
op.drop_constraint('imapfolderinfo_ibfk_1', 'imapfolderinfo',
type_='foreignkey')
op.drop_constraint('imapaccount_id', 'imapfolderinfo', type_='unique')
op.drop_column('imapfolderinfo', 'folder_name')
op.create_foreign_key('imapfolderinfo_ibfk_1', 'imapfolderinfo',
'imapaccount', ['account_id'], ['id'])
op.create_foreign_key('imapfolderinfo_ibfk_2', 'imapfolderinfo', 'folder',
['folder_id'], ['id'])
op.create_unique_constraint('imapaccount_id', 'imapfolderinfo',
['account_id', 'folder_id'])
def downgrade():
raise Exception("no going back!")
|
dbmi-pitt/DIKB-Micropublication | refs/heads/master | scripts/mp-scripts/Bio/stringfns.py | 1 | # Copyright 2000 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""This provides useful general functions for working with strings.
Functions:
splitany Split a string using many delimiters.
find_anychar Find one of a list of characters in a string.
rfind_anychar Find one of a list of characters in a string, from end to start.
starts_with Check whether a string starts with another string [DEPRECATED].
"""
def splitany(s, sep=" \011\012\013\014\015", maxsplit=None, negate=0):
"""splitany(s [,sep [,maxsplit [,negate]]]) -> list of strings
Split a string. Similar to string.split, except that this considers
any one of the characters in sep to be a delimiter. If negate is
true, then everything but sep will be a separator.
"""
strlist = []
prev = 0
for i in range(len(s)):
if maxsplit is not None and len(strlist) >= maxsplit:
break
if (s[i] in sep) == (not negate):
strlist.append(s[prev:i])
prev = i+1
strlist.append(s[prev:])
return strlist
def find_anychar(string, chars, index=None, negate=0):
"""find_anychar(string, chars[, index]) -> index of a character or -1
Find a character in string. chars is a list of characters to look
for. Return the index of the first occurrence of any of the
characters, or -1 if not found. index is the index where the
search should start. By default, I search from the beginning of
the string.
"""
if index is None:
index = 0
while index < len(string) and \
((not negate and string[index] not in chars) or
(negate and string[index] in chars)):
index += 1
if index == len(string):
return -1
return index
def rfind_anychar(string, chars, index=None, negate=0):
"""rfind_anychar(string, chars[, index]) -> index of a character or -1
Find a character in string, looking from the end to the start.
chars is a list of characters to look for. Return the index of
the first occurrence of any of the characters, or -1 if not found.
index is the index where the search should start. By default, I
search from the end of the string.
"""
if index is None:
index = len(string)-1
while index >= 0 and \
((not negate and string[index] not in chars) or
(negate and string[index] in chars)):
index -= 1
# If not found, index will already be -1.
return index
def starts_with(s, start):
"""starts_with(s, start) -> 1/0
Return whether s begins with start.
"""
import warnings
warnings.warn("The starts_with function in Bio.stringfns was deprecated. Please use s.startswith(start) instead of starts_with(s, start)", DeprecationWarning)
return s.startswith(start)
# Try and load C implementations of functions. If I can't,
# then just ignore and use the pure python implementations.
try:
from cstringfns import *
except ImportError:
pass
|
k0ste/ansible | refs/heads/devel | lib/ansible/plugins/action/reboot.py | 17 | # Copyright: (c) 2016-2018, Matt Davis <mdavis@ansible.com>
# Copyright: (c) 2018, Sam Doran <sdoran@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import random
import time
from datetime import datetime, timedelta
from ansible.errors import AnsibleError, AnsibleConnectionFailure
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.common.collections import is_string
from ansible.module_utils.common.validation import check_type_str
from ansible.plugins.action import ActionBase
from ansible.utils.display import Display
display = Display()
class TimedOutException(Exception):
pass
class ActionModule(ActionBase):
TRANSFERS_FILES = False
_VALID_ARGS = frozenset((
'boot_time_command',
'connect_timeout',
'msg',
'post_reboot_delay',
'pre_reboot_delay',
'test_command',
'reboot_timeout',
'search_paths'
))
DEFAULT_REBOOT_TIMEOUT = 600
DEFAULT_CONNECT_TIMEOUT = None
DEFAULT_PRE_REBOOT_DELAY = 0
DEFAULT_POST_REBOOT_DELAY = 0
DEFAULT_TEST_COMMAND = 'whoami'
DEFAULT_BOOT_TIME_COMMAND = 'cat /proc/sys/kernel/random/boot_id'
DEFAULT_REBOOT_MESSAGE = 'Reboot initiated by Ansible'
DEFAULT_SHUTDOWN_COMMAND = 'shutdown'
DEFAULT_SHUTDOWN_COMMAND_ARGS = '-r {delay_min} "{message}"'
DEFAULT_SUDOABLE = True
DEPRECATED_ARGS = {}
BOOT_TIME_COMMANDS = {
'freebsd': '/sbin/sysctl kern.boottime',
'openbsd': '/sbin/sysctl kern.boottime',
'macosx': 'who -b',
'solaris': 'who -b',
'sunos': 'who -b',
'vmkernel': 'grep booted /var/log/vmksummary.log | tail -n 1',
'aix': 'who -b',
}
SHUTDOWN_COMMANDS = {
'alpine': 'reboot',
'vmkernel': 'reboot',
}
SHUTDOWN_COMMAND_ARGS = {
'alpine': '',
'freebsd': '-r +{delay_sec}s "{message}"',
'linux': DEFAULT_SHUTDOWN_COMMAND_ARGS,
'macosx': '-r +{delay_min} "{message}"',
'openbsd': '-r +{delay_min} "{message}"',
'solaris': '-y -g {delay_sec} -i 6 "{message}"',
'sunos': '-y -g {delay_sec} -i 6 "{message}"',
'vmkernel': '-d {delay_sec}',
'aix': '-Fr',
}
TEST_COMMANDS = {
'solaris': 'who',
'vmkernel': 'who',
}
def __init__(self, *args, **kwargs):
super(ActionModule, self).__init__(*args, **kwargs)
@property
def pre_reboot_delay(self):
return self._check_delay('pre_reboot_delay', self.DEFAULT_PRE_REBOOT_DELAY)
@property
def post_reboot_delay(self):
return self._check_delay('post_reboot_delay', self.DEFAULT_POST_REBOOT_DELAY)
def _check_delay(self, key, default):
"""Ensure that the value is positive or zero"""
value = int(self._task.args.get(key, self._task.args.get(key + '_sec', default)))
if value < 0:
value = 0
return value
def _get_value_from_facts(self, variable_name, distribution, default_value):
"""Get dist+version specific args first, then distribution, then family, lastly use default"""
attr = getattr(self, variable_name)
value = attr.get(
distribution['name'] + distribution['version'],
attr.get(
distribution['name'],
attr.get(
distribution['family'],
getattr(self, default_value))))
return value
def get_shutdown_command_args(self, distribution):
args = self._get_value_from_facts('SHUTDOWN_COMMAND_ARGS', distribution, 'DEFAULT_SHUTDOWN_COMMAND_ARGS')
# Convert seconds to minutes. If less that 60, set it to 0.
delay_min = self.pre_reboot_delay // 60
reboot_message = self._task.args.get('msg', self.DEFAULT_REBOOT_MESSAGE)
return args.format(delay_sec=self.pre_reboot_delay, delay_min=delay_min, message=reboot_message)
def get_distribution(self, task_vars):
distribution = {}
display.debug('{action}: running setup module to get distribution'.format(action=self._task.action))
module_output = self._execute_module(
task_vars=task_vars,
module_name='setup',
module_args={'gather_subset': 'min'})
try:
if module_output.get('failed', False):
raise AnsibleError('Failed to determine system distribution. {0}, {1}'.format(
to_native(module_output['module_stdout']).strip(),
to_native(module_output['module_stderr']).strip()))
distribution['name'] = module_output['ansible_facts']['ansible_distribution'].lower()
distribution['version'] = to_text(module_output['ansible_facts']['ansible_distribution_version'].split('.')[0])
distribution['family'] = to_text(module_output['ansible_facts']['ansible_os_family'].lower())
display.debug("{action}: distribution: {dist}".format(action=self._task.action, dist=distribution))
return distribution
except KeyError as ke:
raise AnsibleError('Failed to get distribution information. Missing "{0}" in output.'.format(ke.args[0]))
def get_shutdown_command(self, task_vars, distribution):
shutdown_bin = self._get_value_from_facts('SHUTDOWN_COMMANDS', distribution, 'DEFAULT_SHUTDOWN_COMMAND')
default_search_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin']
search_paths = self._task.args.get('search_paths', default_search_paths)
# FIXME: switch all this to user arg spec validation methods when they are available
# Convert bare strings to a list
if is_string(search_paths):
search_paths = [search_paths]
# Error if we didn't get a list
err_msg = "'search_paths' must be a string or flat list of strings, got {0}"
try:
incorrect_type = any(not is_string(x) for x in search_paths)
if not isinstance(search_paths, list) or incorrect_type:
raise TypeError
except TypeError:
raise AnsibleError(err_msg.format(search_paths))
display.debug('{action}: running find module looking in {paths} to get path for "{command}"'.format(
action=self._task.action,
command=shutdown_bin,
paths=search_paths))
find_result = self._execute_module(
task_vars=task_vars,
module_name='find',
module_args={
'paths': search_paths,
'patterns': [shutdown_bin],
'file_type': 'any'
}
)
full_path = [x['path'] for x in find_result['files']]
if not full_path:
raise AnsibleError('Unable to find command "{0}" in search paths: {1}'.format(shutdown_bin, search_paths))
self._shutdown_command = full_path[0]
return self._shutdown_command
def deprecated_args(self):
for arg, version in self.DEPRECATED_ARGS.items():
if self._task.args.get(arg) is not None:
display.warning("Since Ansible {version}, {arg} is no longer a valid option for {action}".format(
version=version,
arg=arg,
action=self._task.action))
def get_system_boot_time(self, distribution):
boot_time_command = self._get_value_from_facts('BOOT_TIME_COMMANDS', distribution, 'DEFAULT_BOOT_TIME_COMMAND')
if self._task.args.get('boot_time_command'):
boot_time_command = self._task.args.get('boot_time_command')
try:
check_type_str(boot_time_command, allow_conversion=False)
except TypeError as e:
raise AnsibleError("Invalid value given for 'boot_time_command': %s." % to_native(e))
display.debug("{action}: getting boot time with command: '{command}'".format(action=self._task.action, command=boot_time_command))
command_result = self._low_level_execute_command(boot_time_command, sudoable=self.DEFAULT_SUDOABLE)
if command_result['rc'] != 0:
stdout = command_result['stdout']
stderr = command_result['stderr']
raise AnsibleError("{action}: failed to get host boot time info, rc: {rc}, stdout: {out}, stderr: {err}".format(
action=self._task.action,
rc=command_result['rc'],
out=to_native(stdout),
err=to_native(stderr)))
display.debug("{action}: last boot time: {boot}".format(action=self._task.action, boot=command_result['stdout'].strip()))
return command_result['stdout'].strip()
def check_boot_time(self, distribution, previous_boot_time):
display.vvv("{action}: attempting to get system boot time".format(action=self._task.action))
connect_timeout = self._task.args.get('connect_timeout', self._task.args.get('connect_timeout_sec', self.DEFAULT_CONNECT_TIMEOUT))
# override connection timeout from defaults to custom value
if connect_timeout:
try:
display.debug("{action}: setting connect_timeout to {value}".format(action=self._task.action, value=connect_timeout))
self._connection.set_option("connection_timeout", connect_timeout)
self._connection.reset()
except AttributeError:
display.warning("Connection plugin does not allow the connection timeout to be overridden")
# try and get boot time
try:
current_boot_time = self.get_system_boot_time(distribution)
except Exception as e:
raise e
# FreeBSD returns an empty string immediately before reboot so adding a length
# check to prevent prematurely assuming system has rebooted
if len(current_boot_time) == 0 or current_boot_time == previous_boot_time:
raise ValueError("boot time has not changed")
def run_test_command(self, distribution, **kwargs):
test_command = self._task.args.get('test_command', self._get_value_from_facts('TEST_COMMANDS', distribution, 'DEFAULT_TEST_COMMAND'))
display.vvv("{action}: attempting post-reboot test command".format(action=self._task.action))
display.debug("{action}: attempting post-reboot test command '{command}'".format(action=self._task.action, command=test_command))
try:
command_result = self._low_level_execute_command(test_command, sudoable=self.DEFAULT_SUDOABLE)
except Exception:
# may need to reset the connection in case another reboot occurred
# which has invalidated our connection
try:
self._connection.reset()
except AttributeError:
pass
raise
if command_result['rc'] != 0:
msg = 'Test command failed: {err} {out}'.format(
err=to_native(command_result['stderr']),
out=to_native(command_result['stdout']))
raise RuntimeError(msg)
display.vvv("{action}: system successfully rebooted".format(action=self._task.action))
def do_until_success_or_timeout(self, action, reboot_timeout, action_desc, distribution, action_kwargs=None):
max_end_time = datetime.utcnow() + timedelta(seconds=reboot_timeout)
if action_kwargs is None:
action_kwargs = {}
fail_count = 0
max_fail_sleep = 12
while datetime.utcnow() < max_end_time:
try:
action(distribution=distribution, **action_kwargs)
if action_desc:
display.debug('{action}: {desc} success'.format(action=self._task.action, desc=action_desc))
return
except Exception as e:
if isinstance(e, AnsibleConnectionFailure):
try:
self._connection.reset()
except AnsibleConnectionFailure:
pass
# Use exponential backoff with a max timout, plus a little bit of randomness
random_int = random.randint(0, 1000) / 1000
fail_sleep = 2 ** fail_count + random_int
if fail_sleep > max_fail_sleep:
fail_sleep = max_fail_sleep + random_int
if action_desc:
try:
error = to_text(e).splitlines()[-1]
except IndexError as e:
error = to_text(e)
display.debug("{action}: {desc} fail '{err}', retrying in {sleep:.4} seconds...".format(
action=self._task.action,
desc=action_desc,
err=error,
sleep=fail_sleep))
fail_count += 1
time.sleep(fail_sleep)
raise TimedOutException('Timed out waiting for {desc} (timeout={timeout})'.format(desc=action_desc, timeout=reboot_timeout))
def perform_reboot(self, task_vars, distribution):
result = {}
reboot_result = {}
shutdown_command = self.get_shutdown_command(task_vars, distribution)
shutdown_command_args = self.get_shutdown_command_args(distribution)
reboot_command = '{0} {1}'.format(shutdown_command, shutdown_command_args)
try:
display.vvv("{action}: rebooting server...".format(action=self._task.action))
display.debug("{action}: rebooting server with command '{command}'".format(action=self._task.action, command=reboot_command))
reboot_result = self._low_level_execute_command(reboot_command, sudoable=self.DEFAULT_SUDOABLE)
except AnsibleConnectionFailure as e:
# If the connection is closed too quickly due to the system being shutdown, carry on
display.debug('{action}: AnsibleConnectionFailure caught and handled: {error}'.format(action=self._task.action, error=to_text(e)))
reboot_result['rc'] = 0
result['start'] = datetime.utcnow()
if reboot_result['rc'] != 0:
result['failed'] = True
result['rebooted'] = False
result['msg'] = "Reboot command failed. Error was {stdout}, {stderr}".format(
stdout=to_native(reboot_result['stdout'].strip()),
stderr=to_native(reboot_result['stderr'].strip()))
return result
result['failed'] = False
return result
def validate_reboot(self, distribution, original_connection_timeout=None, action_kwargs=None):
display.vvv('{action}: validating reboot'.format(action=self._task.action))
result = {}
try:
# keep on checking system boot_time with short connection responses
reboot_timeout = int(self._task.args.get('reboot_timeout', self._task.args.get('reboot_timeout_sec', self.DEFAULT_REBOOT_TIMEOUT)))
self.do_until_success_or_timeout(
action=self.check_boot_time,
action_desc="last boot time check",
reboot_timeout=reboot_timeout,
distribution=distribution,
action_kwargs=action_kwargs)
# Get the connect_timeout set on the connection to compare to the original
try:
connect_timeout = self._connection.get_option('connection_timeout')
except KeyError:
pass
else:
if original_connection_timeout != connect_timeout:
try:
display.debug("{action}: setting connect_timeout back to original value of {value}".format(
action=self._task.action,
value=original_connection_timeout))
self._connection.set_option("connection_timeout", original_connection_timeout)
self._connection.reset()
except (AnsibleError, AttributeError) as e:
# reset the connection to clear the custom connection timeout
display.debug("{action}: failed to reset connection_timeout back to default: {error}".format(action=self._task.action,
error=to_text(e)))
# finally run test command to ensure everything is working
# FUTURE: add a stability check (system must remain up for N seconds) to deal with self-multi-reboot updates
self.do_until_success_or_timeout(
action=self.run_test_command,
action_desc="post-reboot test command",
reboot_timeout=reboot_timeout,
distribution=distribution,
action_kwargs=action_kwargs)
result['rebooted'] = True
result['changed'] = True
except TimedOutException as toex:
result['failed'] = True
result['rebooted'] = True
result['msg'] = to_text(toex)
return result
return result
def run(self, tmp=None, task_vars=None):
self._supports_check_mode = True
self._supports_async = True
# If running with local connection, fail so we don't reboot ourself
if self._connection.transport == 'local':
msg = 'Running {0} with local connection would reboot the control node.'.format(self._task.action)
return {'changed': False, 'elapsed': 0, 'rebooted': False, 'failed': True, 'msg': msg}
if self._play_context.check_mode:
return {'changed': True, 'elapsed': 0, 'rebooted': True}
if task_vars is None:
task_vars = {}
self.deprecated_args()
result = super(ActionModule, self).run(tmp, task_vars)
if result.get('skipped', False) or result.get('failed', False):
return result
distribution = self.get_distribution(task_vars)
# Get current boot time
try:
previous_boot_time = self.get_system_boot_time(distribution)
except Exception as e:
result['failed'] = True
result['reboot'] = False
result['msg'] = to_text(e)
return result
# Get the original connection_timeout option var so it can be reset after
original_connection_timeout = None
try:
original_connection_timeout = self._connection.get_option('connection_timeout')
display.debug("{action}: saving original connect_timeout of {timeout}".format(action=self._task.action, timeout=original_connection_timeout))
except KeyError:
display.debug("{action}: connect_timeout connection option has not been set".format(action=self._task.action))
# Initiate reboot
reboot_result = self.perform_reboot(task_vars, distribution)
if reboot_result['failed']:
result = reboot_result
elapsed = datetime.utcnow() - reboot_result['start']
result['elapsed'] = elapsed.seconds
return result
if self.post_reboot_delay != 0:
display.debug("{action}: waiting an additional {delay} seconds".format(action=self._task.action, delay=self.post_reboot_delay))
display.vvv("{action}: waiting an additional {delay} seconds".format(action=self._task.action, delay=self.post_reboot_delay))
time.sleep(self.post_reboot_delay)
# Make sure reboot was successful
result = self.validate_reboot(distribution, original_connection_timeout, action_kwargs={'previous_boot_time': previous_boot_time})
elapsed = datetime.utcnow() - reboot_result['start']
result['elapsed'] = elapsed.seconds
return result
|
lecaoquochung/ddnb.django | refs/heads/master | tests/version/tests.py | 352 | from unittest import TestCase
from django import get_version
from django.utils import six
class VersionTests(TestCase):
def test_development(self):
ver_tuple = (1, 4, 0, 'alpha', 0)
# This will return a different result when it's run within or outside
# of a git clone: 1.4.devYYYYMMDDHHMMSS or 1.4.
ver_string = get_version(ver_tuple)
six.assertRegex(self, ver_string, r'1\.4(\.dev[0-9]+)?')
def test_releases(self):
tuples_to_strings = (
((1, 4, 0, 'alpha', 1), '1.4a1'),
((1, 4, 0, 'beta', 1), '1.4b1'),
((1, 4, 0, 'rc', 1), '1.4c1'),
((1, 4, 0, 'final', 0), '1.4'),
((1, 4, 1, 'rc', 2), '1.4.1c2'),
((1, 4, 1, 'final', 0), '1.4.1'),
)
for ver_tuple, ver_string in tuples_to_strings:
self.assertEqual(get_version(ver_tuple), ver_string)
|
jgowans/directionFinder_backend | refs/heads/master | tests/test_antenna.py | 1 | #!/usr/bin/env python
import unittest
import numpy as np
import sys
from directionFinder_backend import antenna
class AntennaTester(unittest.TestCase):
def setUp(self):
self.antenna = antenna.Antenna(12.34, 23.45) # x,y coordinates
def test_coordinates(self):
self.assertAlmostEqual(self.antenna.x, 12.34)
self.assertAlmostEqual(self.antenna.y, 23.45)
def test_rotated_pi(self):
rotated = self.antenna.rotated(np.pi)
self.assertAlmostEqual(rotated.x, -12.34)
self.assertAlmostEqual(rotated.y, -23.45)
def test_rotated_3_2_pi(self):
rotated = self.antenna.rotated(3*np.pi/2)
self.assertAlmostEqual(rotated.x, 23.45)
self.assertAlmostEqual(rotated.y, -12.34)
def test_phase_no_rotate(self):
# http://www.wolframalpha.com/input/?i=angle%28e^%28i*12.34%29%29
self.assertAlmostEqual(self.antenna.phase_at_angle(0), -0.22637061)
def test_phase_rotated(self):
""" Roatated by 2 radians. Rotation means counter clockwise, yes?
http://www.wolframalpha.com/input/?i=arg%28e^%28i*Re%28%2812.34%2B23.45i%29*e^%282i%29%29%29%29
"""
self.assertAlmostEqual(self.antenna.phase_at_angle(2), -1.32553539)
|
canardleteer/objbomber | refs/heads/master | objbomber/objbomber.py | 1 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script doesn't actually do much that's useful.
It was designed to be an orchestration test between a few libraries,
now quite a few libraries.
"""
import os
import time
import sys
import getopt
import logging
import pickle
import random
import datetime
import uuid
logging.basicConfig(level=logging.DEBUG)
from pydisque.client import Client
from pybloomd import BloomdClient
from pyhlld import HlldClient
import redis
from itsdangerous import Serializer
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
from Crypto.Hash import SHA
from Crypto import Random
import leveldb
def usage():
"""Print the usage of the software."""
txt = """
objbomber.py [args]
acceptable arguments:
--mode=["generate", "listen", "check"]
generate - tell the script to generate some random messages
listen - tell the script to listen to disque for messages
check - tell the script to validate messages against bloomd
initialize - tell the script that this is the inital run
(sets up cryptographic keys)
--dservers=[comma separated host:port list of disque servers]
defaults to: "localhost:7712,localhost:7711"
(can be a single host:port pair)
--dqueue=[name of queue to use in Disque | default "objbomber"]
(note that "{dqueue}.check" will also be used)
--secret=[the shared secret | default "coolsecretbro"]
--bserver=[single host:port:udpport of bloomd the server]
defaults to: localhost:8673
--hserver=[single host:port]
defaults to localhost:4553
--rserver=[single host:port]
defaults to localhost:6379
--bfiltername=[single name of the bloom filter | default "objbomber"]
--sleep=[seconds to wait in our core loop | default 0]
--help (prints this message)
"""
print(txt)
def main():
"""Primary CLI application logic."""
try:
opts, args = getopt.getopt(sys.argv[1:], "h:v", ["help",
"dservers=",
"dqueue=",
"secret=",
"bserver=",
"hserver=",
"rserver=",
"rchannel=",
"bfiltername=",
"hllname=",
"mode=",
"sleep="])
except getopt.GetoptError as err:
print(str(err))
usage()
sys.exit()
modes = ("generate", "listen", "check", "adaptive", "initialize", "subscriber")
# set defaults
mode = None
dservers = "localhost:7712,localhost:7711"
dqueue = "objbomber"
secret = "coolsecretbro"
bserver = "localhost:8673"
hserver = "localhost:4553"
rserver = "localhost:6379"
rchannel = "objbomber"
bfiltername = "objbomber"
hllname = "objbomber"
sleep = None
userhomedir = os.path.expanduser("~")
# flippin' switches...
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("--dservers"):
dservers = a
elif o in ("--queue"):
dqueue = [a]
elif o in ("--secret"):
secret = a
elif o in ("--bserver"):
bserver = a
elif o in ("--hserver"):
hserver = a
elif o in ("--rserver"):
rserver = a
elif o in ("--rchannel"):
rchannel = a
elif o in ("--bfiltername"):
bfiltername = a
elif o in ("--hllname"):
hllname = a
elif o in ("--mode"):
if a in modes:
mode = a
else:
usage()
sys.exit()
elif o in ("--listen"):
mode_listen = True
elif o in ("--check"):
mode_check = True
elif o in ("--sleep"):
sleep = int(a)
else:
assert False, "unhandled option"
checkdqueue = dqueue + ".check"
if sleep in (None, 0):
sleep = 0.0001
# mode must be set
if not mode:
usage()
sys.exit()
# Handler for the cryptographic signatures
# TODO: secret should be "secret" + a version number
s = Serializer(secret)
# config basics
datadir = userhomedir + "/.objbomber"
# prepare servers and queue lists
dservers = dservers.split(",")
bserver = [bserver]
hserver = hserver
# all modes use Disque
logging.info("Connecting to Disque...")
disque_client = Client(dservers)
disque_client.connect()
if mode in ("check", "listen"):
logging.info("Creating Bloomd Client...")
bloomd_client = BloomdClient(bserver)
bfilter = bloomd_client.create_filter(bfiltername)
# add pyhlld
logging.info("Creating HLLD Client... - not yet used")
hlld_client = HlldClient(hserver)
hll = hlld_client.create_set(hllname)
if mode in ("check", "listen", "generate", "subscriber"):
# add redis hll & pubsub
logging.info("Creating Redis Client...")
rhost, rport = rserver.split(":")
redd = redis.StrictRedis(host=rhost, port=rport, db=0)
redpubsub = redd.pubsub()
if mode in ("subscriber"):
redpubsub.subscribe(rchannel)
if mode in ("generate"):
# TODO: check on how well LevelDB handles
# multiple clients
db = leveldb.LevelDB(datadir + '/notary')
# special mode to handle our first run
# TODO: push into a function
# TODO: handle filesystem errors
# TODO: reconsider using Cement for all of this
# TODO: generate an instance GUID
if mode == "initialize":
UUID = uuid.uuid4()
logging.info("Our system UUID is now: %s" % UUID)
# TODO: save and load this uuid
# check to see if there is a ~/.objbomber directory, quit if there is
# TODO: this does not handle errors in initalization
logging.info("Checking for .objbomber in %s..." % userhomedir)
if os.path.exists(datadir):
logging.info("Already been initialized!")
# TODO: print some information about how to handle this
sys.exit()
# TODO: make one
os.mkdir(datadir, 0700)
# generate our RSA signing key
# TODO: make # of bits in key a flag
logging.info("Begining to create our notary key.")
logging.info("Reading from RNG.")
rng = Random.new().read
logging.info("Generating RSA key...")
privRSAkey = RSA.generate(4096, rng)
privPEM = privRSAkey.exportKey()
pubRSAkey = privRSAkey.publickey()
pubPEM = pubRSAkey.exportKey()
logging.info("Key generated.")
# save privkey to disk
with open(datadir + "/privkey.pem", "w") as keyfile:
keyfile.write(privPEM)
keyfile.close()
os.chmod(datadir + "/privkey.pem", 0700)
logging.info("Unencrypted RSA key written to disk.")
# save the pubkey
with open(datadir + "/pubkey.pem", "w") as keyfile:
keyfile.write(pubPEM)
keyfile.close()
logging.info("Public RSA key written to disk.")
logging.info("Creating crypto notary storage.")
leveldb.LevelDB(datadir + '/notary')
# we absolutely must quit here, or we will get stuck in
# an infinate loop
sys.exit()
# load our secret key (TODO: this is probably better as try/exc)
# and build our contexts
with open(datadir + "/privkey.pem", "r") as keyfile:
privRSAkey = RSA.importKey(keyfile.read())
while True:
# TODO: Adaptive Mode - this mode should peek the queues, and
# make a decision about where this thread can make the most
# impact on its own.
if mode == "adaptive":
# TODO: Do some queue peeking.
# TODO: Make some decisions about which mode to adapt to.
pass
# TODO: All modes should be placed into functions.
# Listen Mode - Listens to the queue, pulls out jobs,
# validates the signature, puts them in bloomd
if mode == "listen":
logging.info("Getting Jobs from Disque.")
jobs = disque_client.get_job([dqueue])
print("Got %d jobs." % len(jobs))
for queue_name, job_id, job in jobs:
logging.debug("Handling a job: %s" % job)
try:
job = s.loads(job)
logging.debug("Job Authenticated: %s" % job)
except:
logging.warning("Job did not pass authentication.")
disque_client.nack_job(job_id)
# add to bloom filter
try:
bfilter.add(job)
except:
logging.warning("Job was not added to bloomd.")
disque_client.nack_job(job_id)
try:
hllResponse = hll.add(job)
except:
logging.warning("Job was not added to hlld.")
disque_client.nack_job(job_id)
# TODO: add redis HLL support
# tell disque that this job has been processed
disque_client.ack_job(job_id)
# sign the check job
job = s.dumps(job)
# throw this message on the check queue
disque_client.add_job(checkdqueue, job)
elif mode == "check":
# TODO
# Check the secondary disque queue for checks
# Ask the bloom filter if they have seen this
logging.info("Getting Jobs from Disque.")
jobs = disque_client.get_job([checkdqueue])
for queue_name, job_id, job in jobs:
logging.debug("Checking: %s" % job)
try:
job = s.loads(job)
except:
disque_client.nack_job(job_id)
# we don't NACK on failed cache hits
if job in bfilter:
logging.info("Confirming: %s" % job)
else:
logging.info("Not found in bloom filter: %s" % job)
disque_client.ack_job(job_id)
elif mode == "generate":
# TODO - where will these messages come from?
# for now they will just be random numbers, but
# really we should make them objects to really be
# testing serialization
msg = [random.randint(1000, 1000000),
random.randint(1000, 1000000)]
# itsdangerous serialization & signing
msg = s.dumps(msg)
# Now that this message is serialized, we can sign it again with a
# public key.
# TODO: incorporate saving the signature into the notary records
msghash = SHA.new(msg)
signer = PKCS1_v1_5.new(privRSAkey)
signature = signer.sign(msghash)
assert signer.verify(msghash, signature)
record = {'message': msg, 'signature': signature}
record = pickle.dumps(record)
# send the job over to Disque
# TODO: add more command line flags for queuing
job_id = disque_client.add_job(dqueue, msg)
logging.debug("Added a job to Disque: %s" % msg)
# publish just the signature on redis pubsub
redd.publish(rchannel, signature)
# TODO: save the publication in the notary
# TODO: do more then just save the signatures
# TODO: add a GUID to the key
key = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S-%f")
db.Put(key, record)
# testing the results of leveldb's store, this is a test, and
# an expensive test
sig2 = db.Get(key)
sig2 = pickle.loads(sig2)['signature']
assert signer.verify(msghash, sig2)
elif mode == "subscriber":
msg = redpubsub.get_message()
# TODO: do something useful, like log
if msg:
print("got a message")
time.sleep(sleep)
if __name__ == "__main__":
main()
|
troygrosfield/Django-facebook | refs/heads/master | docs/docs_env/Lib/encodings/iso2022_jp_3.py | 816 | #
# iso2022_jp_3.py: Python Unicode Codec for ISO2022_JP_3
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_iso2022, codecs
import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp_3')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='iso2022_jp_3',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
wagnerand/olympia | refs/heads/master | src/olympia/core/__init__.py | 10 | import threading
default_app_config = 'olympia.core.apps.CoreConfig'
_locals = threading.local()
_locals.user = None
_locals.remote_addr = None
def get_user():
return getattr(_locals, 'user', None)
def set_user(user):
_locals.user = user
def get_remote_addr():
return getattr(_locals, 'remote_addr', None)
def set_remote_addr(remote_addr):
_locals.remote_addr = remote_addr
|
shamoons/linguist-samples | refs/heads/master | samples/Python/django-models-base.py | 92 | from __future__ import unicode_literals
import copy
import sys
from functools import update_wrapper
from future_builtins import zip
import django.db.models.manager # Imported to register signal handler.
from django.conf import settings
from django.core.exceptions import (ObjectDoesNotExist,
MultipleObjectsReturned, FieldError, ValidationError, NON_FIELD_ERRORS)
from django.core import validators
from django.db.models.fields import AutoField, FieldDoesNotExist
from django.db.models.fields.related import (ManyToOneRel,
OneToOneField, add_lazy_relation)
from django.db import (router, transaction, DatabaseError,
DEFAULT_DB_ALIAS)
from django.db.models.query import Q
from django.db.models.query_utils import DeferredAttribute
from django.db.models.deletion import Collector
from django.db.models.options import Options
from django.db.models import signals
from django.db.models.loading import register_models, get_model
from django.utils.translation import ugettext_lazy as _
from django.utils.functional import curry
from django.utils.encoding import smart_str, force_unicode
from django.utils.text import get_text_list, capfirst
class ModelBase(type):
"""
Metaclass for all models.
"""
def __new__(cls, name, bases, attrs):
super_new = super(ModelBase, cls).__new__
parents = [b for b in bases if isinstance(b, ModelBase)]
if not parents:
# If this isn't a subclass of Model, don't do anything special.
return super_new(cls, name, bases, attrs)
# Create the class.
module = attrs.pop('__module__')
new_class = super_new(cls, name, bases, {'__module__': module})
attr_meta = attrs.pop('Meta', None)
abstract = getattr(attr_meta, 'abstract', False)
if not attr_meta:
meta = getattr(new_class, 'Meta', None)
else:
meta = attr_meta
base_meta = getattr(new_class, '_meta', None)
if getattr(meta, 'app_label', None) is None:
# Figure out the app_label by looking one level up.
# For 'django.contrib.sites.models', this would be 'sites'.
model_module = sys.modules[new_class.__module__]
kwargs = {"app_label": model_module.__name__.split('.')[-2]}
else:
kwargs = {}
new_class.add_to_class('_meta', Options(meta, **kwargs))
if not abstract:
new_class.add_to_class('DoesNotExist', subclass_exception(b'DoesNotExist',
tuple(x.DoesNotExist
for x in parents if hasattr(x, '_meta') and not x._meta.abstract)
or (ObjectDoesNotExist,), module))
new_class.add_to_class('MultipleObjectsReturned', subclass_exception(b'MultipleObjectsReturned',
tuple(x.MultipleObjectsReturned
for x in parents if hasattr(x, '_meta') and not x._meta.abstract)
or (MultipleObjectsReturned,), module))
if base_meta and not base_meta.abstract:
# Non-abstract child classes inherit some attributes from their
# non-abstract parent (unless an ABC comes before it in the
# method resolution order).
if not hasattr(meta, 'ordering'):
new_class._meta.ordering = base_meta.ordering
if not hasattr(meta, 'get_latest_by'):
new_class._meta.get_latest_by = base_meta.get_latest_by
is_proxy = new_class._meta.proxy
if getattr(new_class, '_default_manager', None):
if not is_proxy:
# Multi-table inheritance doesn't inherit default manager from
# parents.
new_class._default_manager = None
new_class._base_manager = None
else:
# Proxy classes do inherit parent's default manager, if none is
# set explicitly.
new_class._default_manager = new_class._default_manager._copy_to_model(new_class)
new_class._base_manager = new_class._base_manager._copy_to_model(new_class)
# Bail out early if we have already created this class.
m = get_model(new_class._meta.app_label, name,
seed_cache=False, only_installed=False)
if m is not None:
return m
# Add all attributes to the class.
for obj_name, obj in attrs.items():
new_class.add_to_class(obj_name, obj)
# All the fields of any type declared on this model
new_fields = new_class._meta.local_fields + \
new_class._meta.local_many_to_many + \
new_class._meta.virtual_fields
field_names = set([f.name for f in new_fields])
# Basic setup for proxy models.
if is_proxy:
base = None
for parent in [cls for cls in parents if hasattr(cls, '_meta')]:
if parent._meta.abstract:
if parent._meta.fields:
raise TypeError("Abstract base class containing model fields not permitted for proxy model '%s'." % name)
else:
continue
if base is not None:
raise TypeError("Proxy model '%s' has more than one non-abstract model base class." % name)
else:
base = parent
if base is None:
raise TypeError("Proxy model '%s' has no non-abstract model base class." % name)
if (new_class._meta.local_fields or
new_class._meta.local_many_to_many):
raise FieldError("Proxy model '%s' contains model fields." % name)
new_class._meta.setup_proxy(base)
new_class._meta.concrete_model = base._meta.concrete_model
else:
new_class._meta.concrete_model = new_class
# Do the appropriate setup for any model parents.
o2o_map = dict([(f.rel.to, f) for f in new_class._meta.local_fields
if isinstance(f, OneToOneField)])
for base in parents:
original_base = base
if not hasattr(base, '_meta'):
# Things without _meta aren't functional models, so they're
# uninteresting parents.
continue
parent_fields = base._meta.local_fields + base._meta.local_many_to_many
# Check for clashes between locally declared fields and those
# on the base classes (we cannot handle shadowed fields at the
# moment).
for field in parent_fields:
if field.name in field_names:
raise FieldError('Local field %r in class %r clashes '
'with field of similar name from '
'base class %r' %
(field.name, name, base.__name__))
if not base._meta.abstract:
# Concrete classes...
base = base._meta.concrete_model
if base in o2o_map:
field = o2o_map[base]
elif not is_proxy:
attr_name = '%s_ptr' % base._meta.module_name
field = OneToOneField(base, name=attr_name,
auto_created=True, parent_link=True)
new_class.add_to_class(attr_name, field)
else:
field = None
new_class._meta.parents[base] = field
else:
# .. and abstract ones.
for field in parent_fields:
new_class.add_to_class(field.name, copy.deepcopy(field))
# Pass any non-abstract parent classes onto child.
new_class._meta.parents.update(base._meta.parents)
# Inherit managers from the abstract base classes.
new_class.copy_managers(base._meta.abstract_managers)
# Proxy models inherit the non-abstract managers from their base,
# unless they have redefined any of them.
if is_proxy:
new_class.copy_managers(original_base._meta.concrete_managers)
# Inherit virtual fields (like GenericForeignKey) from the parent
# class
for field in base._meta.virtual_fields:
if base._meta.abstract and field.name in field_names:
raise FieldError('Local field %r in class %r clashes '\
'with field of similar name from '\
'abstract base class %r' % \
(field.name, name, base.__name__))
new_class.add_to_class(field.name, copy.deepcopy(field))
if abstract:
# Abstract base models can't be instantiated and don't appear in
# the list of models for an app. We do the final setup for them a
# little differently from normal models.
attr_meta.abstract = False
new_class.Meta = attr_meta
return new_class
new_class._prepare()
register_models(new_class._meta.app_label, new_class)
# Because of the way imports happen (recursively), we may or may not be
# the first time this model tries to register with the framework. There
# should only be one class for each model, so we always return the
# registered version.
return get_model(new_class._meta.app_label, name,
seed_cache=False, only_installed=False)
def copy_managers(cls, base_managers):
# This is in-place sorting of an Options attribute, but that's fine.
base_managers.sort()
for _, mgr_name, manager in base_managers:
val = getattr(cls, mgr_name, None)
if not val or val is manager:
new_manager = manager._copy_to_model(cls)
cls.add_to_class(mgr_name, new_manager)
def add_to_class(cls, name, value):
if hasattr(value, 'contribute_to_class'):
value.contribute_to_class(cls, name)
else:
setattr(cls, name, value)
def _prepare(cls):
"""
Creates some methods once self._meta has been populated.
"""
opts = cls._meta
opts._prepare(cls)
if opts.order_with_respect_to:
cls.get_next_in_order = curry(cls._get_next_or_previous_in_order, is_next=True)
cls.get_previous_in_order = curry(cls._get_next_or_previous_in_order, is_next=False)
# defer creating accessors on the foreign class until we are
# certain it has been created
def make_foreign_order_accessors(field, model, cls):
setattr(
field.rel.to,
'get_%s_order' % cls.__name__.lower(),
curry(method_get_order, cls)
)
setattr(
field.rel.to,
'set_%s_order' % cls.__name__.lower(),
curry(method_set_order, cls)
)
add_lazy_relation(
cls,
opts.order_with_respect_to,
opts.order_with_respect_to.rel.to,
make_foreign_order_accessors
)
# Give the class a docstring -- its definition.
if cls.__doc__ is None:
cls.__doc__ = "%s(%s)" % (cls.__name__, ", ".join([f.attname for f in opts.fields]))
if hasattr(cls, 'get_absolute_url'):
cls.get_absolute_url = update_wrapper(curry(get_absolute_url, opts, cls.get_absolute_url),
cls.get_absolute_url)
signals.class_prepared.send(sender=cls)
class ModelState(object):
"""
A class for storing instance state
"""
def __init__(self, db=None):
self.db = db
# If true, uniqueness validation checks will consider this a new, as-yet-unsaved object.
# Necessary for correct validation of new instances of objects with explicit (non-auto) PKs.
# This impacts validation only; it has no effect on the actual save.
self.adding = True
class Model(object):
__metaclass__ = ModelBase
_deferred = False
def __init__(self, *args, **kwargs):
signals.pre_init.send(sender=self.__class__, args=args, kwargs=kwargs)
# Set up the storage for instance state
self._state = ModelState()
# There is a rather weird disparity here; if kwargs, it's set, then args
# overrides it. It should be one or the other; don't duplicate the work
# The reason for the kwargs check is that standard iterator passes in by
# args, and instantiation for iteration is 33% faster.
args_len = len(args)
if args_len > len(self._meta.fields):
# Daft, but matches old exception sans the err msg.
raise IndexError("Number of args exceeds number of fields")
fields_iter = iter(self._meta.fields)
if not kwargs:
# The ordering of the zip calls matter - zip throws StopIteration
# when an iter throws it. So if the first iter throws it, the second
# is *not* consumed. We rely on this, so don't change the order
# without changing the logic.
for val, field in zip(args, fields_iter):
setattr(self, field.attname, val)
else:
# Slower, kwargs-ready version.
for val, field in zip(args, fields_iter):
setattr(self, field.attname, val)
kwargs.pop(field.name, None)
# Maintain compatibility with existing calls.
if isinstance(field.rel, ManyToOneRel):
kwargs.pop(field.attname, None)
# Now we're left with the unprocessed fields that *must* come from
# keywords, or default.
for field in fields_iter:
is_related_object = False
# This slightly odd construct is so that we can access any
# data-descriptor object (DeferredAttribute) without triggering its
# __get__ method.
if (field.attname not in kwargs and
isinstance(self.__class__.__dict__.get(field.attname), DeferredAttribute)):
# This field will be populated on request.
continue
if kwargs:
if isinstance(field.rel, ManyToOneRel):
try:
# Assume object instance was passed in.
rel_obj = kwargs.pop(field.name)
is_related_object = True
except KeyError:
try:
# Object instance wasn't passed in -- must be an ID.
val = kwargs.pop(field.attname)
except KeyError:
val = field.get_default()
else:
# Object instance was passed in. Special case: You can
# pass in "None" for related objects if it's allowed.
if rel_obj is None and field.null:
val = None
else:
try:
val = kwargs.pop(field.attname)
except KeyError:
# This is done with an exception rather than the
# default argument on pop because we don't want
# get_default() to be evaluated, and then not used.
# Refs #12057.
val = field.get_default()
else:
val = field.get_default()
if is_related_object:
# If we are passed a related instance, set it using the
# field.name instead of field.attname (e.g. "user" instead of
# "user_id") so that the object gets properly cached (and type
# checked) by the RelatedObjectDescriptor.
setattr(self, field.name, rel_obj)
else:
setattr(self, field.attname, val)
if kwargs:
for prop in kwargs.keys():
try:
if isinstance(getattr(self.__class__, prop), property):
setattr(self, prop, kwargs.pop(prop))
except AttributeError:
pass
if kwargs:
raise TypeError("'%s' is an invalid keyword argument for this function" % kwargs.keys()[0])
super(Model, self).__init__()
signals.post_init.send(sender=self.__class__, instance=self)
def __repr__(self):
try:
u = unicode(self)
except (UnicodeEncodeError, UnicodeDecodeError):
u = '[Bad Unicode data]'
return smart_str('<%s: %s>' % (self.__class__.__name__, u))
def __str__(self):
if hasattr(self, '__unicode__'):
return force_unicode(self).encode('utf-8')
return '%s object' % self.__class__.__name__
def __eq__(self, other):
return isinstance(other, self.__class__) and self._get_pk_val() == other._get_pk_val()
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self._get_pk_val())
def __reduce__(self):
"""
Provides pickling support. Normally, this just dispatches to Python's
standard handling. However, for models with deferred field loading, we
need to do things manually, as they're dynamically created classes and
only module-level classes can be pickled by the default path.
"""
data = self.__dict__
model = self.__class__
# The obvious thing to do here is to invoke super().__reduce__()
# for the non-deferred case. Don't do that.
# On Python 2.4, there is something weird with __reduce__,
# and as a result, the super call will cause an infinite recursion.
# See #10547 and #12121.
defers = []
if self._deferred:
from django.db.models.query_utils import deferred_class_factory
factory = deferred_class_factory
for field in self._meta.fields:
if isinstance(self.__class__.__dict__.get(field.attname),
DeferredAttribute):
defers.append(field.attname)
model = self._meta.proxy_for_model
else:
factory = simple_class_factory
return (model_unpickle, (model, defers, factory), data)
def _get_pk_val(self, meta=None):
if not meta:
meta = self._meta
return getattr(self, meta.pk.attname)
def _set_pk_val(self, value):
return setattr(self, self._meta.pk.attname, value)
pk = property(_get_pk_val, _set_pk_val)
def serializable_value(self, field_name):
"""
Returns the value of the field name for this instance. If the field is
a foreign key, returns the id value, instead of the object. If there's
no Field object with this name on the model, the model attribute's
value is returned directly.
Used to serialize a field's value (in the serializer, or form output,
for example). Normally, you would just access the attribute directly
and not use this method.
"""
try:
field = self._meta.get_field_by_name(field_name)[0]
except FieldDoesNotExist:
return getattr(self, field_name)
return getattr(self, field.attname)
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
"""
Saves the current instance. Override this in a subclass if you want to
control the saving process.
The 'force_insert' and 'force_update' parameters can be used to insist
that the "save" must be an SQL insert or update (or equivalent for
non-SQL backends), respectively. Normally, they should not be set.
"""
if force_insert and (force_update or update_fields):
raise ValueError("Cannot force both insert and updating in model saving.")
if update_fields is not None:
# If update_fields is empty, skip the save. We do also check for
# no-op saves later on for inheritance cases. This bailout is
# still needed for skipping signal sending.
if len(update_fields) == 0:
return
update_fields = frozenset(update_fields)
field_names = set([field.name for field in self._meta.fields
if not field.primary_key])
non_model_fields = update_fields.difference(field_names)
if non_model_fields:
raise ValueError("The following fields do not exist in this "
"model or are m2m fields: %s"
% ', '.join(non_model_fields))
self.save_base(using=using, force_insert=force_insert,
force_update=force_update, update_fields=update_fields)
save.alters_data = True
def save_base(self, raw=False, cls=None, origin=None, force_insert=False,
force_update=False, using=None, update_fields=None):
"""
Does the heavy-lifting involved in saving. Subclasses shouldn't need to
override this method. It's separate from save() in order to hide the
need for overrides of save() to pass around internal-only parameters
('raw', 'cls', and 'origin').
"""
using = using or router.db_for_write(self.__class__, instance=self)
assert not (force_insert and (force_update or update_fields))
assert update_fields is None or len(update_fields) > 0
if cls is None:
cls = self.__class__
meta = cls._meta
if not meta.proxy:
origin = cls
else:
meta = cls._meta
if origin and not meta.auto_created:
signals.pre_save.send(sender=origin, instance=self, raw=raw, using=using,
update_fields=update_fields)
# If we are in a raw save, save the object exactly as presented.
# That means that we don't try to be smart about saving attributes
# that might have come from the parent class - we just save the
# attributes we have been given to the class we have been given.
# We also go through this process to defer the save of proxy objects
# to their actual underlying model.
if not raw or meta.proxy:
if meta.proxy:
org = cls
else:
org = None
for parent, field in meta.parents.items():
# At this point, parent's primary key field may be unknown
# (for example, from administration form which doesn't fill
# this field). If so, fill it.
if field and getattr(self, parent._meta.pk.attname) is None and getattr(self, field.attname) is not None:
setattr(self, parent._meta.pk.attname, getattr(self, field.attname))
self.save_base(cls=parent, origin=org, using=using,
update_fields=update_fields)
if field:
setattr(self, field.attname, self._get_pk_val(parent._meta))
if meta.proxy:
return
if not meta.proxy:
non_pks = [f for f in meta.local_fields if not f.primary_key]
if update_fields:
non_pks = [f for f in non_pks if f.name in update_fields]
# First, try an UPDATE. If that doesn't update anything, do an INSERT.
pk_val = self._get_pk_val(meta)
pk_set = pk_val is not None
record_exists = True
manager = cls._base_manager
if pk_set:
# Determine if we should do an update (pk already exists, forced update,
# no force_insert)
if ((force_update or update_fields) or (not force_insert and
manager.using(using).filter(pk=pk_val).exists())):
if force_update or non_pks:
values = [(f, None, (raw and getattr(self, f.attname) or f.pre_save(self, False))) for f in non_pks]
if values:
rows = manager.using(using).filter(pk=pk_val)._update(values)
if force_update and not rows:
raise DatabaseError("Forced update did not affect any rows.")
if update_fields and not rows:
raise DatabaseError("Save with update_fields did not affect any rows.")
else:
record_exists = False
if not pk_set or not record_exists:
if meta.order_with_respect_to:
# If this is a model with an order_with_respect_to
# autopopulate the _order field
field = meta.order_with_respect_to
order_value = manager.using(using).filter(**{field.name: getattr(self, field.attname)}).count()
self._order = order_value
fields = meta.local_fields
if not pk_set:
if force_update or update_fields:
raise ValueError("Cannot force an update in save() with no primary key.")
fields = [f for f in fields if not isinstance(f, AutoField)]
record_exists = False
update_pk = bool(meta.has_auto_field and not pk_set)
result = manager._insert([self], fields=fields, return_id=update_pk, using=using, raw=raw)
if update_pk:
setattr(self, meta.pk.attname, result)
transaction.commit_unless_managed(using=using)
# Store the database on which the object was saved
self._state.db = using
# Once saved, this is no longer a to-be-added instance.
self._state.adding = False
# Signal that the save is complete
if origin and not meta.auto_created:
signals.post_save.send(sender=origin, instance=self, created=(not record_exists),
update_fields=update_fields, raw=raw, using=using)
save_base.alters_data = True
def delete(self, using=None):
using = using or router.db_for_write(self.__class__, instance=self)
assert self._get_pk_val() is not None, "%s object can't be deleted because its %s attribute is set to None." % (self._meta.object_name, self._meta.pk.attname)
collector = Collector(using=using)
collector.collect([self])
collector.delete()
delete.alters_data = True
def _get_FIELD_display(self, field):
value = getattr(self, field.attname)
return force_unicode(dict(field.flatchoices).get(value, value), strings_only=True)
def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs):
if not self.pk:
raise ValueError("get_next/get_previous cannot be used on unsaved objects.")
op = is_next and 'gt' or 'lt'
order = not is_next and '-' or ''
param = smart_str(getattr(self, field.attname))
q = Q(**{'%s__%s' % (field.name, op): param})
q = q|Q(**{field.name: param, 'pk__%s' % op: self.pk})
qs = self.__class__._default_manager.using(self._state.db).filter(**kwargs).filter(q).order_by('%s%s' % (order, field.name), '%spk' % order)
try:
return qs[0]
except IndexError:
raise self.DoesNotExist("%s matching query does not exist." % self.__class__._meta.object_name)
def _get_next_or_previous_in_order(self, is_next):
cachename = "__%s_order_cache" % is_next
if not hasattr(self, cachename):
op = is_next and 'gt' or 'lt'
order = not is_next and '-_order' or '_order'
order_field = self._meta.order_with_respect_to
obj = self._default_manager.filter(**{
order_field.name: getattr(self, order_field.attname)
}).filter(**{
'_order__%s' % op: self._default_manager.values('_order').filter(**{
self._meta.pk.name: self.pk
})
}).order_by(order)[:1].get()
setattr(self, cachename, obj)
return getattr(self, cachename)
def prepare_database_save(self, unused):
return self.pk
def clean(self):
"""
Hook for doing any extra model-wide validation after clean() has been
called on every field by self.clean_fields. Any ValidationError raised
by this method will not be associated with a particular field; it will
have a special-case association with the field defined by NON_FIELD_ERRORS.
"""
pass
def validate_unique(self, exclude=None):
"""
Checks unique constraints on the model and raises ``ValidationError``
if any failed.
"""
unique_checks, date_checks = self._get_unique_checks(exclude=exclude)
errors = self._perform_unique_checks(unique_checks)
date_errors = self._perform_date_checks(date_checks)
for k, v in date_errors.items():
errors.setdefault(k, []).extend(v)
if errors:
raise ValidationError(errors)
def _get_unique_checks(self, exclude=None):
"""
Gather a list of checks to perform. Since validate_unique could be
called from a ModelForm, some fields may have been excluded; we can't
perform a unique check on a model that is missing fields involved
in that check.
Fields that did not validate should also be excluded, but they need
to be passed in via the exclude argument.
"""
if exclude is None:
exclude = []
unique_checks = []
unique_togethers = [(self.__class__, self._meta.unique_together)]
for parent_class in self._meta.parents.keys():
if parent_class._meta.unique_together:
unique_togethers.append((parent_class, parent_class._meta.unique_together))
for model_class, unique_together in unique_togethers:
for check in unique_together:
for name in check:
# If this is an excluded field, don't add this check.
if name in exclude:
break
else:
unique_checks.append((model_class, tuple(check)))
# These are checks for the unique_for_<date/year/month>.
date_checks = []
# Gather a list of checks for fields declared as unique and add them to
# the list of checks.
fields_with_class = [(self.__class__, self._meta.local_fields)]
for parent_class in self._meta.parents.keys():
fields_with_class.append((parent_class, parent_class._meta.local_fields))
for model_class, fields in fields_with_class:
for f in fields:
name = f.name
if name in exclude:
continue
if f.unique:
unique_checks.append((model_class, (name,)))
if f.unique_for_date and f.unique_for_date not in exclude:
date_checks.append((model_class, 'date', name, f.unique_for_date))
if f.unique_for_year and f.unique_for_year not in exclude:
date_checks.append((model_class, 'year', name, f.unique_for_year))
if f.unique_for_month and f.unique_for_month not in exclude:
date_checks.append((model_class, 'month', name, f.unique_for_month))
return unique_checks, date_checks
def _perform_unique_checks(self, unique_checks):
errors = {}
for model_class, unique_check in unique_checks:
# Try to look up an existing object with the same values as this
# object's values for all the unique field.
lookup_kwargs = {}
for field_name in unique_check:
f = self._meta.get_field(field_name)
lookup_value = getattr(self, f.attname)
if lookup_value is None:
# no value, skip the lookup
continue
if f.primary_key and not self._state.adding:
# no need to check for unique primary key when editing
continue
lookup_kwargs[str(field_name)] = lookup_value
# some fields were skipped, no reason to do the check
if len(unique_check) != len(lookup_kwargs.keys()):
continue
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
# Note that we need to use the pk as defined by model_class, not
# self.pk. These can be different fields because model inheritance
# allows single model to have effectively multiple primary keys.
# Refs #17615.
model_class_pk = self._get_pk_val(model_class._meta)
if not self._state.adding and model_class_pk is not None:
qs = qs.exclude(pk=model_class_pk)
if qs.exists():
if len(unique_check) == 1:
key = unique_check[0]
else:
key = NON_FIELD_ERRORS
errors.setdefault(key, []).append(self.unique_error_message(model_class, unique_check))
return errors
def _perform_date_checks(self, date_checks):
errors = {}
for model_class, lookup_type, field, unique_for in date_checks:
lookup_kwargs = {}
# there's a ticket to add a date lookup, we can remove this special
# case if that makes it's way in
date = getattr(self, unique_for)
if date is None:
continue
if lookup_type == 'date':
lookup_kwargs['%s__day' % unique_for] = date.day
lookup_kwargs['%s__month' % unique_for] = date.month
lookup_kwargs['%s__year' % unique_for] = date.year
else:
lookup_kwargs['%s__%s' % (unique_for, lookup_type)] = getattr(date, lookup_type)
lookup_kwargs[field] = getattr(self, field)
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
if not self._state.adding and self.pk is not None:
qs = qs.exclude(pk=self.pk)
if qs.exists():
errors.setdefault(field, []).append(
self.date_error_message(lookup_type, field, unique_for)
)
return errors
def date_error_message(self, lookup_type, field, unique_for):
opts = self._meta
return _("%(field_name)s must be unique for %(date_field)s %(lookup)s.") % {
'field_name': unicode(capfirst(opts.get_field(field).verbose_name)),
'date_field': unicode(capfirst(opts.get_field(unique_for).verbose_name)),
'lookup': lookup_type,
}
def unique_error_message(self, model_class, unique_check):
opts = model_class._meta
model_name = capfirst(opts.verbose_name)
# A unique field
if len(unique_check) == 1:
field_name = unique_check[0]
field = opts.get_field(field_name)
field_label = capfirst(field.verbose_name)
# Insert the error into the error dict, very sneaky
return field.error_messages['unique'] % {
'model_name': unicode(model_name),
'field_label': unicode(field_label)
}
# unique_together
else:
field_labels = map(lambda f: capfirst(opts.get_field(f).verbose_name), unique_check)
field_labels = get_text_list(field_labels, _('and'))
return _("%(model_name)s with this %(field_label)s already exists.") % {
'model_name': unicode(model_name),
'field_label': unicode(field_labels)
}
def full_clean(self, exclude=None):
"""
Calls clean_fields, clean, and validate_unique, on the model,
and raises a ``ValidationError`` for any errors that occured.
"""
errors = {}
if exclude is None:
exclude = []
try:
self.clean_fields(exclude=exclude)
except ValidationError as e:
errors = e.update_error_dict(errors)
# Form.clean() is run even if other validation fails, so do the
# same with Model.clean() for consistency.
try:
self.clean()
except ValidationError as e:
errors = e.update_error_dict(errors)
# Run unique checks, but only for fields that passed validation.
for name in errors.keys():
if name != NON_FIELD_ERRORS and name not in exclude:
exclude.append(name)
try:
self.validate_unique(exclude=exclude)
except ValidationError as e:
errors = e.update_error_dict(errors)
if errors:
raise ValidationError(errors)
def clean_fields(self, exclude=None):
"""
Cleans all fields and raises a ValidationError containing message_dict
of all validation errors if any occur.
"""
if exclude is None:
exclude = []
errors = {}
for f in self._meta.fields:
if f.name in exclude:
continue
# Skip validation for empty fields with blank=True. The developer
# is responsible for making sure they have a valid value.
raw_value = getattr(self, f.attname)
if f.blank and raw_value in validators.EMPTY_VALUES:
continue
try:
setattr(self, f.attname, f.clean(raw_value, self))
except ValidationError as e:
errors[f.name] = e.messages
if errors:
raise ValidationError(errors)
############################################
# HELPER FUNCTIONS (CURRIED MODEL METHODS) #
############################################
# ORDERING METHODS #########################
def method_set_order(ordered_obj, self, id_list, using=None):
if using is None:
using = DEFAULT_DB_ALIAS
rel_val = getattr(self, ordered_obj._meta.order_with_respect_to.rel.field_name)
order_name = ordered_obj._meta.order_with_respect_to.name
# FIXME: It would be nice if there was an "update many" version of update
# for situations like this.
for i, j in enumerate(id_list):
ordered_obj.objects.filter(**{'pk': j, order_name: rel_val}).update(_order=i)
transaction.commit_unless_managed(using=using)
def method_get_order(ordered_obj, self):
rel_val = getattr(self, ordered_obj._meta.order_with_respect_to.rel.field_name)
order_name = ordered_obj._meta.order_with_respect_to.name
pk_name = ordered_obj._meta.pk.name
return [r[pk_name] for r in
ordered_obj.objects.filter(**{order_name: rel_val}).values(pk_name)]
##############################################
# HELPER FUNCTIONS (CURRIED MODEL FUNCTIONS) #
##############################################
def get_absolute_url(opts, func, self, *args, **kwargs):
return settings.ABSOLUTE_URL_OVERRIDES.get('%s.%s' % (opts.app_label, opts.module_name), func)(self, *args, **kwargs)
########
# MISC #
########
class Empty(object):
pass
def simple_class_factory(model, attrs):
"""Used to unpickle Models without deferred fields.
We need to do this the hard way, rather than just using
the default __reduce__ implementation, because of a
__deepcopy__ problem in Python 2.4
"""
return model
def model_unpickle(model, attrs, factory):
"""
Used to unpickle Model subclasses with deferred fields.
"""
cls = factory(model, attrs)
return cls.__new__(cls)
model_unpickle.__safe_for_unpickle__ = True
def subclass_exception(name, parents, module):
return type(name, parents, {'__module__': module})
|
analurandis/Tur | refs/heads/master | backend/venv/Lib/site-packages/paste/script/default_sysconfig.py | 6 | # (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""
This module contains default sysconfig settings.
The command object is inserted into this module as a global variable
``paste_command``, and can be used inside functions.
"""
def add_custom_options(parser):
"""
This method can modify the ``parser`` object (which is an
``optparse.OptionParser`` instance). This can be used to add new
options to the command.
"""
pass
def default_config_filename(installer):
"""
This function can return a default filename or directory for the
configuration file, if none was explicitly given.
Return None to mean no preference. The first non-None returning
value will be used.
Pay attention to ``installer.expect_config_directory`` here,
and to ``installer.default_config_filename``.
"""
return installer.default_config_filename
def install_variables(installer):
"""
Returns a dictionary of variables for use later in the process
(e.g., filling a configuration file). These are combined from all
sysconfig files.
"""
return {}
def post_setup_hook(installer, config_file):
"""
This is called at the very end of ``paster setup-app``. You
might use it to register an application globally.
"""
pass
|
barraponto/python-steamtagger | refs/heads/master | src/steamtagger/__main__.py | 5 | import sys
import click
# Why does this file exist, and why __main__?
# For more info, read:
# - https://www.python.org/dev/peps/pep-0338/
# - https://docs.python.org/2/using/cmdline.html#cmdoption-m
# - https://docs.python.org/3/using/cmdline.html#cmdoption-m
@click.command()
@click.argument('names', nargs=-1)
def main(names):
click.echo(repr(names))
if __name__ == "__main__":
sys.exit(main())
|
tensorflow/tensorboard | refs/heads/master | tensorboard/summary/v1.py | 1 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Central API entry point for v1 versions of summary operations.
This module simply offers a shorter way to access the members of modules
like `tensorboard.plugins.scalar.summary`.
"""
from tensorboard.plugins.audio import summary as _audio_summary
from tensorboard.plugins.custom_scalar import summary as _custom_scalar_summary
from tensorboard.plugins.histogram import summary as _histogram_summary
from tensorboard.plugins.image import summary as _image_summary
from tensorboard.plugins.pr_curve import summary as _pr_curve_summary
from tensorboard.plugins.scalar import summary as _scalar_summary
from tensorboard.plugins.text import summary as _text_summary
audio = _audio_summary.op
audio_pb = _audio_summary.pb
custom_scalar = _custom_scalar_summary.op
custom_scalar_pb = _custom_scalar_summary.pb
histogram = _histogram_summary.op
histogram_pb = _histogram_summary.pb
image = _image_summary.op
image_pb = _image_summary.pb
pr_curve = _pr_curve_summary.op
pr_curve_pb = _pr_curve_summary.pb
pr_curve_streaming_op = _pr_curve_summary.streaming_op
pr_curve_raw_data_op = _pr_curve_summary.raw_data_op
pr_curve_raw_data_pb = _pr_curve_summary.raw_data_pb
scalar = _scalar_summary.op
scalar_pb = _scalar_summary.pb
text = _text_summary.op
text_pb = _text_summary.pb
|
science09/xhtml2pdf | refs/heads/master | demo/cherrypy/demo-cherrypy.py | 154 | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
#############################################
## (C)opyright by Dirk Holtwick, 2008 ##
## All rights reserved ##
#############################################
import cherrypy as cp
import sx.pisa3 as pisa
import cStringIO as StringIO
try:
import kid
except:
kid = None
class PDFDemo(object):
"""
Simple demo showing a form where you can enter some HTML code.
After sending PISA is used to convert HTML to PDF and publish
it directly.
"""
@cp.expose
def index(self):
if kid:
return file("demo-cherrypy.html","r").read()
return """
<html><body>
Please enter some HTML code:
<form action="download" method="post" enctype="multipart/form-data">
<textarea name="data">Hello <strong>World</strong></textarea>
<br />
<input type="submit" value="Convert HTML to PDF" />
</form>
</body></html>
"""
@cp.expose
def download(self, data):
if kid:
data = """<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml"
xmlns:py="http://purl.org/kid/ns#">
<head>
<title>PDF Demo</title>
</head>
<body>%s</body>
</html>""" % data
test = kid.Template(source=data)
data = test.serialize(output='xhtml')
result = StringIO.StringIO()
pdf = pisa.CreatePDF(
StringIO.StringIO(data),
result
)
if pdf.err:
return "We had some errors in HTML"
else:
cp.response.headers["content-type"] = "application/pdf"
return result.getvalue()
cp.tree.mount(PDFDemo())
if __name__ == '__main__':
import os.path
cp.config.update(os.path.join(__file__.replace(".py", ".conf")))
cp.server.quickstart()
cp.engine.start()
|
pilliq/mongo-web-shell | refs/heads/master | webapps/trymongo/app.py | 2 | # Copyright 2013 10gen Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import Flask
from webapps.lib.log import configure_logging
from webapps.lib.conf import update_config
from webapps.trymongo.views import trymongo
def create_app():
app = Flask(__name__)
app.config.from_object('webapps.configs.trymongo')
# Overrides the config with any environment variables that might
# be set
update_config(app, 'TRY')
configure_logging(app)
app.register_blueprint(trymongo)
return app
app = create_app()
|
mbrukman/flocker | refs/heads/master | flocker/acceptance/integration/testtools.py | 6 | # Copyright ClusterHQ Inc. See LICENSE file for details.
"""
Testing infrastructure for integration tests.
"""
from twisted.trial.unittest import TestCase
from ..testtools import require_cluster, create_dataset
from ...testtools import random_name
def make_dataset_integration_testcase(image_name, volume_path, internal_port,
insert_data, assert_inserted):
"""
Create a ``TestCase`` that tests a particular container can
successfully use Flocker datasets as volumes.
:param unicode image_name: The image to run.
:param FilePath volume_path: The path within the container where a
volume should be mounted.
:param int internal_port: The port the container listens on.
:param insert_data: Callable that given test instance, host and port,
connects using an appropriate client and inserts some
data. Should return ``Deferred`` that fires on success.
:param assert_inserted: Callable that given test instance, host and
port asserts that data was inserted by ``insert_data``. Should
return ``Deferred`` that fires on success.
:return: ``TestCase`` subclass.
"""
class IntegrationTests(TestCase):
"""
Test that the given application can start and restart with Flocker
datasets as volumes.
"""
def _start_container(self, name, dataset_id, external_port, cluster,
cleanup=True):
"""
Start a container with a volume.
:param unicode name: The container name.
:param UUID dataset_id: The dataset ID.
:param cluster: The ``Cluster``.
:param int external_port: External port to expose on the container.
:param bool cleanup: If true, delete container when test is over.
:return: ``Deferred`` that fires when the container has been
started.
"""
app = {
u"name": name,
u"node_uuid": cluster.nodes[0].uuid,
u"image": image_name,
u"ports": [{u"internal": internal_port,
u"external": external_port}],
u'restart_policy': {u'name': u'never'},
u"volumes": [{u"dataset_id": unicode(dataset_id),
u"mountpoint": volume_path.path}],
}
created = cluster.create_container(app)
if cleanup:
created.addCallback(lambda _: self.addCleanup(
cluster.remove_container, name))
return created
@require_cluster(1)
def test_start(self, cluster):
"""
The specified application can be started with a Docker dataset
configured as its volume.
This ensures a newly created dataset meets the requirements of
the application being tested. For example, some Docker
containers can require a completely empty volume, or one that
is writeable by non-root users, etc..
"""
host = cluster.nodes[0].public_address
port = 12345
creating_dataset = create_dataset(self, cluster)
creating_dataset.addCallback(
lambda dataset: self._start_container(random_name(self),
dataset.dataset_id,
port, cluster))
creating_dataset.addCallback(
lambda _: insert_data(self, host, port))
creating_dataset.addCallback(
lambda _: assert_inserted(self, host, port))
return creating_dataset
@require_cluster(1)
def test_restart(self, cluster):
"""
The specified application can be started with a Docker dataset
configured as its volume that has already been used by the
same application previously.
"""
host = cluster.nodes[0].public_address
port = 12345
another_port = 12366
first_container = random_name(self)
creating_dataset = create_dataset(self, cluster)
def created(dataset):
started = self._start_container(first_container,
dataset.dataset_id,
port, cluster, cleanup=False)
started.addCallback(
lambda _: insert_data(self, host, port))
restarting = started.addCallback(
lambda _: cluster.remove_container(first_container))
restarting.addCallback(
lambda _: self._start_container(random_name(self),
dataset.dataset_id,
another_port, cluster))
return restarting
creating_dataset.addCallback(created)
creating_dataset.addCallback(
lambda _: assert_inserted(self, host, another_port))
return creating_dataset
return IntegrationTests
|
eric-stanley/selenium | refs/heads/master | py/selenium/webdriver/opera/webdriver.py | 71 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
try:
import http.client as http_client
except ImportError:
import httplib as http_client
import os
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
from selenium.webdriver.chrome.webdriver import WebDriver as ChromiumDriver
from .options import Options
class OperaDriver(ChromiumDriver):
"""Controls the new OperaDriver and allows you
to drive the Opera browser based on Chromium."""
def __init__(self, executable_path=None, port=0,
opera_options=None, service_args=None,
desired_capabilities=None, service_log_path=None):
"""
Creates a new instance of the operadriver.
Starts the service and then creates new instance of operadriver.
:Args:
- executable_path - path to the executable. If the default is used
it assumes the executable is in the $PATH
- port - port you would like the service to run, if left as 0,
a free port will be found.
- desired_capabilities: Dictionary object with non-browser specific
capabilities only, such as "proxy" or "loggingPref".
- chrome_options: this takes an instance of ChromeOptions
"""
executable_path = (executable_path if executable_path is not None
else "operadriver")
ChromiumDriver.__init__(self,
executable_path=executable_path,
port=port,
chrome_options=opera_options,
service_args=service_args,
desired_capabilities=desired_capabilities,
service_log_path=service_log_path)
def create_options(self):
return Options()
class WebDriver(OperaDriver):
class ServiceType:
CHROMIUM = 2
def __init__(self,
desired_capabilities=None,
executable_path=None,
port=0,
service_log_path=None,
service_args=None,
opera_options=None):
OperaDriver.__init__(self, executable_path=executable_path,
port=port, opera_options=opera_options,
service_args=service_args,
desired_capabilities=desired_capabilities,
service_log_path=service_log_path)
|
mfnch/pyrtist | refs/heads/master | pyrtist/win_setup.py | 1 | # Copyright (C) 2017 Matteo Franchin
#
# This file is part of Pyrtist.
# Pyrtist is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 2.1 of the License, or
# (at your option) any later version.
#
# Pyrtist is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrtist. If not, see <http://www.gnu.org/licenses/>.
#!/usr/bin/env python
# example basictreeview.py
import pygtk
pygtk.require('2.0')
import gtk
import gui.config as config
from gui.boxer import debug
spacing = 6
class ConfigTab(object):
def __init__(self):
pass
class BoxerWindowSettings(object):
def __init__(self, size=(600, 400)):
# Create a new window
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.set_border_width(spacing)
self.window.set_title("Boxer configuration editor")
self.window.set_size_request(*size)
self.window.connect("delete_event", self._on_delete_event)
# The window has one top main region and a bottom region where the
# ok/cancel buttons are
self.window_versplit = gtk.VBox()
self.window.add(self.window_versplit)
# The window is split horizontally into two parts:
# - on the left we have the zone where we can select the setting to change
# - on the right we can actually manipulate the setting
self.window_horsplit = gtk.HPaned()
self.window_versplit.pack_start(self.window_horsplit, expand=True,
fill=True, padding=0)
self.window_button_ok = gtk.Button(label="_Ok")
self.window_button_cancel = gtk.Button(label="_Cancel")
self.window_butbox = gtk.HButtonBox()
self.window_butbox.add(self.window_button_ok)
self.window_butbox.add(self.window_button_cancel)
self.window_butbox.set_layout(gtk.BUTTONBOX_END)
self.window_butbox.set_spacing(spacing)
self.window_versplit.pack_start(self.window_butbox, expand=False,
fill=False, padding=0)
# We first define the right part, which is split vertically in two
self.window_versplit2 = gtk.VBox(False, 4)
self.window_horsplit.pack2(self.window_versplit2)
# RIGHT PART: In the upper part we have a text entry, which is initially
# filled with the current setting and the user can edit in order to change
# it.
self.window_label = wd = gtk.Label("No option selected.")
wd.set_single_line_mode(False)
wd.set_line_wrap(True)
wd.set_alignment(0, 0)
self.window_versplit2.pack_start(wd, expand=False, fill=False, padding=0)
self.window_entry = gtk.Entry()
self.window_versplit2.pack_start(self.window_entry, expand=False,
fill=False, padding=0)
self.window_textview = gtk.TextView()
self.window_textview.set_editable(False)
self.window_textview.set_cursor_visible(False)
self.window_textview.set_wrap_mode(gtk.WRAP_NONE)
self.window_versplit2.pack_start(self.window_textview, expand=True,
fill=True, padding=4)
# create a TreeStore with one string column to use as the model
self.treestore = gtk.TreeStore(str)
self.config = cfg = config.get_configuration()
sections = cfg.get_sections()
for section in sections:
piter = self.treestore.append(None, [section])
options = cfg.get_options(section)
for option in options:
self.treestore.append(piter, [option])
# create the TreeView using treestore
self.treeview = gtk.TreeView(self.treestore)
# create the TreeViewColumn to display the data
self.tvcolumn = gtk.TreeViewColumn('Available settings')
# add tvcolumn to treeview
self.treeview.append_column(self.tvcolumn)
# create a CellRendererText to render the data
self.cell = gtk.CellRendererText()
# add the cell to the tvcolumn and allow it to expand
self.tvcolumn.pack_start(self.cell, True)
# set the cell "text" attribute to column 0 - retrieve text
# from that column in treestore
self.tvcolumn.add_attribute(self.cell, 'text', 0)
self.treeview.set_search_column(0) # Make it searchable
self.tvcolumn.set_sort_column_id(0) # Allow sorting on the column
self.treeview.set_reorderable(True) # Allow drag and drop reordering of rows
self.treeview.connect("row-activated", self._on_row_activated)
self.window_button_ok.connect("button-press-event",
self._on_button_ok_press)
self.window_button_cancel.connect("button-press-event",
self._on_button_cancel_press)
# Insert objects one inside the other
self.window_scrolledwin = scrolledwin = gtk.ScrolledWindow()
scrolledwin.set_size_request(int(size[0]/3), -1)
scrolledwin.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
scrolledwin.add(self.treeview)
self.window_horsplit.pack1(scrolledwin)
self.window.show_all()
self.option_section = None
self.option_name = None
self.option_initial_value = None
self.option_changed_vals = {}
def quit(self):
self.window.hide()
gtk.main_quit()
def _on_delete_event(self, widget, event, data=None):
gtk.main_quit()
return False
def _on_button_ok_press(self, *args):
self._apply_changes()
self.config.save_configuration()
self.quit()
def _on_button_cancel_press(self, *args):
self.quit()
def _on_row_activated(self, treeview, path, view_column):
selection = treeview.get_selection()
ts, ti = selection.get_selected()
parent_iter = ts.iter_parent(ti)
if parent_iter:
section = ts.get_value(parent_iter, 0)
option = ts.get_value(ti, 0)
self._show_new_tab(section, option)
def _save_tab_settings(self):
entry = self.window_entry
initial_val = self.option_initial_value
if initial_val != None:
cur_val = entry.get_text()
if cur_val != initial_val:
sect = self.option_changed_vals.setdefault(self.option_section, {})
sect[self.option_name] = cur_val
def _show_new_tab(self, section, option):
self._save_tab_settings()
desc = self.config.get_desc(section, option)
range_str = desc.get_range()
value = self.option_changed_vals.get(section, {}).get(option, None)
if value == None:
value = self.config.get(section, option)
value = str(desc.get(value))
entry = self.window_entry
label = self.window_label
opt_str = "%s.%s: %s" % (section, option, range_str)
label.set_text("%s\n%s" % (opt_str, desc))
entry.set_text(value)
entry.set_tooltip_text(range_str)
self.option_section = section
self.option_name = option
self.option_initial_value = value
def _apply_changes(self):
self._save_tab_settings()
for section, section_dict in self.option_changed_vals.items():
for option, val in section_dict.items():
desc = self.config.get_desc(section, option)
self.config.set(section, option, str(desc.set(val)))
def main():
gtk.main()
if __name__ == "__main__":
tmp = BoxerWindowSettings()
main()
|
40223202/test | refs/heads/master | static/Brython3.1.3-20150514-095342/Lib/copy.py | 628 | """Generic (shallow and deep) copying operations.
Interface summary:
import copy
x = copy.copy(y) # make a shallow copy of y
x = copy.deepcopy(y) # make a deep copy of y
For module specific errors, copy.Error is raised.
The difference between shallow and deep copying is only relevant for
compound objects (objects that contain other objects, like lists or
class instances).
- A shallow copy constructs a new compound object and then (to the
extent possible) inserts *the same objects* into it that the
original contains.
- A deep copy constructs a new compound object and then, recursively,
inserts *copies* into it of the objects found in the original.
Two problems often exist with deep copy operations that don't exist
with shallow copy operations:
a) recursive objects (compound objects that, directly or indirectly,
contain a reference to themselves) may cause a recursive loop
b) because deep copy copies *everything* it may copy too much, e.g.
administrative data structures that should be shared even between
copies
Python's deep copy operation avoids these problems by:
a) keeping a table of objects already copied during the current
copying pass
b) letting user-defined classes override the copying operation or the
set of components copied
This version does not copy types like module, class, function, method,
nor stack trace, stack frame, nor file, socket, window, nor array, nor
any similar types.
Classes can use the same interfaces to control copying that they use
to control pickling: they can define methods called __getinitargs__(),
__getstate__() and __setstate__(). See the documentation for module
"pickle" for information on these methods.
"""
import types
import weakref
from copyreg import dispatch_table
import builtins
class Error(Exception):
pass
error = Error # backward compatibility
# module org.python.core does not exist in Brython, so lets just ignore
# this import request.
#try:
# from org.python.core import PyStringMap
#except ImportError:
# PyStringMap = None
PyStringMap = None
__all__ = ["Error", "copy", "deepcopy"]
def copy(x):
"""Shallow copy operation on arbitrary Python objects.
See the module's __doc__ string for more info.
"""
cls = type(x)
copier = _copy_dispatch.get(cls)
if copier:
return copier(x)
copier = getattr(cls, "__copy__", None)
if copier:
return copier(x)
reductor = dispatch_table.get(cls)
if reductor:
rv = reductor(x)
else:
reductor = getattr(x, "__reduce_ex__", None)
if reductor:
rv = reductor(2)
else:
reductor = getattr(x, "__reduce__", None)
if reductor:
rv = reductor()
else:
raise Error("un(shallow)copyable object of type %s" % cls)
return _reconstruct(x, rv, 0)
_copy_dispatch = d = {}
def _copy_immutable(x):
return x
for t in (type(None), int, float, bool, str, tuple,
frozenset, type, range,
types.BuiltinFunctionType, type(Ellipsis),
types.FunctionType, weakref.ref):
d[t] = _copy_immutable
t = getattr(types, "CodeType", None)
if t is not None:
d[t] = _copy_immutable
for name in ("complex", "unicode"):
t = getattr(builtins, name, None)
if t is not None:
d[t] = _copy_immutable
def _copy_with_constructor(x):
return type(x)(x)
for t in (list, dict, set):
d[t] = _copy_with_constructor
def _copy_with_copy_method(x):
return x.copy()
if PyStringMap is not None:
d[PyStringMap] = _copy_with_copy_method
del d
def deepcopy(x, memo=None, _nil=[]):
"""Deep copy operation on arbitrary Python objects.
See the module's __doc__ string for more info.
"""
if memo is None:
memo = {}
d = id(x)
y = memo.get(d, _nil)
if y is not _nil:
return y
cls = type(x)
copier = _deepcopy_dispatch.get(cls)
if copier:
y = copier(x, memo)
else:
try:
issc = issubclass(cls, type)
except TypeError: # cls is not a class (old Boost; see SF #502085)
issc = 0
if issc:
y = _deepcopy_atomic(x, memo)
else:
copier = getattr(x, "__deepcopy__", None)
if copier:
y = copier(memo)
else:
reductor = dispatch_table.get(cls)
if reductor:
rv = reductor(x)
else:
reductor = getattr(x, "__reduce_ex__", None)
if reductor:
rv = reductor(2)
else:
reductor = getattr(x, "__reduce__", None)
if reductor:
rv = reductor()
else:
raise Error(
"un(deep)copyable object of type %s" % cls)
y = _reconstruct(x, rv, 1, memo)
# If is its own copy, don't memoize.
if y is not x:
memo[d] = y
_keep_alive(x, memo) # Make sure x lives at least as long as d
return y
_deepcopy_dispatch = d = {}
def _deepcopy_atomic(x, memo):
return x
d[type(None)] = _deepcopy_atomic
d[type(Ellipsis)] = _deepcopy_atomic
d[int] = _deepcopy_atomic
d[float] = _deepcopy_atomic
d[bool] = _deepcopy_atomic
try:
d[complex] = _deepcopy_atomic
except NameError:
pass
d[bytes] = _deepcopy_atomic
d[str] = _deepcopy_atomic
try:
d[types.CodeType] = _deepcopy_atomic
except AttributeError:
pass
d[type] = _deepcopy_atomic
d[range] = _deepcopy_atomic
d[types.BuiltinFunctionType] = _deepcopy_atomic
d[types.FunctionType] = _deepcopy_atomic
d[weakref.ref] = _deepcopy_atomic
def _deepcopy_list(x, memo):
y = []
memo[id(x)] = y
for a in x:
y.append(deepcopy(a, memo))
return y
d[list] = _deepcopy_list
def _deepcopy_tuple(x, memo):
y = []
for a in x:
y.append(deepcopy(a, memo))
# We're not going to put the tuple in the memo, but it's still important we
# check for it, in case the tuple contains recursive mutable structures.
try:
return memo[id(x)]
except KeyError:
pass
for i in range(len(x)):
if x[i] is not y[i]:
y = tuple(y)
break
else:
y = x
return y
d[tuple] = _deepcopy_tuple
def _deepcopy_dict(x, memo):
y = {}
memo[id(x)] = y
for key, value in x.items():
y[deepcopy(key, memo)] = deepcopy(value, memo)
return y
d[dict] = _deepcopy_dict
if PyStringMap is not None:
d[PyStringMap] = _deepcopy_dict
def _deepcopy_method(x, memo): # Copy instance methods
return type(x)(x.__func__, deepcopy(x.__self__, memo))
_deepcopy_dispatch[types.MethodType] = _deepcopy_method
def _keep_alive(x, memo):
"""Keeps a reference to the object x in the memo.
Because we remember objects by their id, we have
to assure that possibly temporary objects are kept
alive by referencing them.
We store a reference at the id of the memo, which should
normally not be used unless someone tries to deepcopy
the memo itself...
"""
try:
memo[id(memo)].append(x)
except KeyError:
# aha, this is the first one :-)
memo[id(memo)]=[x]
def _reconstruct(x, info, deep, memo=None):
if isinstance(info, str):
return x
assert isinstance(info, tuple)
if memo is None:
memo = {}
n = len(info)
assert n in (2, 3, 4, 5)
callable, args = info[:2]
if n > 2:
state = info[2]
else:
state = {}
if n > 3:
listiter = info[3]
else:
listiter = None
if n > 4:
dictiter = info[4]
else:
dictiter = None
if deep:
args = deepcopy(args, memo)
y = callable(*args)
memo[id(x)] = y
if state:
if deep:
state = deepcopy(state, memo)
if hasattr(y, '__setstate__'):
y.__setstate__(state)
else:
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
else:
slotstate = None
if state is not None:
y.__dict__.update(state)
if slotstate is not None:
for key, value in slotstate.items():
setattr(y, key, value)
if listiter is not None:
for item in listiter:
if deep:
item = deepcopy(item, memo)
y.append(item)
if dictiter is not None:
for key, value in dictiter:
if deep:
key = deepcopy(key, memo)
value = deepcopy(value, memo)
y[key] = value
return y
del d
del types
# Helper for instance creation without calling __init__
class _EmptyClass:
pass
|
javachengwc/hue | refs/heads/master | desktop/core/ext-py/Django-1.6.10/tests/test_discovery_sample2/tests.py | 114 | from django.test import TestCase
class Test(TestCase):
def test_sample(self):
pass
|
AlexanderSavelyev/rdkit | refs/heads/master | rdkit/Chem/Pharm3D/ExcludedVolume.py | 4 | # $Id$
#
# Copyright (C) 2004-2006 Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
class ExcludedVolume(object):
def __init__(self, featInfo,index=-1,exclusionDist=3.0):
"""
featInfo should be a sequence of ([indices],min,max) tuples
"""
self.index = index
try:
l = len(featInfo)
except AttributeError:
raise ValueError('featInfo argument must be a sequence of sequences')
if not len(featInfo):
raise ValueError('featInfo argument must non-empty')
try:
a,b,c = featInfo[0]
except Type:
raise ValueError('featInfo elements must be 3-sequences')
except ValueError:
raise ValueError('featInfo elements must be 3-sequences')
self.featInfo = featInfo[:]
self.exclusionDist = exclusionDist
self.pos = None
|
kaarl/pyload | refs/heads/stable | module/plugins/accounts/FastshareCz.py | 5 | # -*- coding: utf-8 -*-
import re
from module.plugins.internal.Account import Account
from module.plugins.internal.misc import set_cookie
class FastshareCz(Account):
__name__ = "FastshareCz"
__type__ = "account"
__version__ = "0.13"
__status__ = "testing"
__description__ = """Fastshare.cz account plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "zoidberg@mujmail.cz"),
("stickell", "l.stickell@yahoo.it")]
CREDIT_PATTERN = r'Credit\s*:\s*</td>\s*<td>(.+?)\s*<'
def grab_info(self, user, password, data):
validuntil = -1
trafficleft = None
premium = False
html = self.load("http://www.fastshare.cz/user")
m = re.search(self.CREDIT_PATTERN, html)
if m is not None:
trafficleft = self.parse_traffic(m.group(1))
premium = bool(trafficleft)
return {'validuntil' : validuntil,
'trafficleft': trafficleft,
'premium' : premium}
def signin(self, user, password, data):
set_cookie(self.req.cj, "fastshare.cz", "lang", "en")
self.load('http://www.fastshare.cz/login') #@NOTE: Do not remove or it will not login
html = self.load("https://www.fastshare.cz/sql.php",
post={'login': user,
'heslo': password})
if ">Wrong username or password" in html:
self.fail_login()
|
davygeek/vitess | refs/heads/master | py/vtproto/tabletmanagerdata_pb2.py | 3 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tabletmanagerdata.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import query_pb2 as query__pb2
import topodata_pb2 as topodata__pb2
import replicationdata_pb2 as replicationdata__pb2
import logutil_pb2 as logutil__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tabletmanagerdata.proto',
package='tabletmanagerdata',
syntax='proto3',
serialized_options=_b('Z.vitess.io/vitess/go/vt/proto/tabletmanagerdata'),
serialized_pb=_b('\n\x17tabletmanagerdata.proto\x12\x11tabletmanagerdata\x1a\x0bquery.proto\x1a\x0etopodata.proto\x1a\x15replicationdata.proto\x1a\rlogutil.proto\"\x93\x01\n\x0fTableDefinition\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06schema\x18\x02 \x01(\t\x12\x0f\n\x07\x63olumns\x18\x03 \x03(\t\x12\x1b\n\x13primary_key_columns\x18\x04 \x03(\t\x12\x0c\n\x04type\x18\x05 \x01(\t\x12\x13\n\x0b\x64\x61ta_length\x18\x06 \x01(\x04\x12\x11\n\trow_count\x18\x07 \x01(\x04\"{\n\x10SchemaDefinition\x12\x17\n\x0f\x64\x61tabase_schema\x18\x01 \x01(\t\x12=\n\x11table_definitions\x18\x02 \x03(\x0b\x32\".tabletmanagerdata.TableDefinition\x12\x0f\n\x07version\x18\x03 \x01(\t\"\x8b\x01\n\x12SchemaChangeResult\x12:\n\rbefore_schema\x18\x01 \x01(\x0b\x32#.tabletmanagerdata.SchemaDefinition\x12\x39\n\x0c\x61\x66ter_schema\x18\x02 \x01(\x0b\x32#.tabletmanagerdata.SchemaDefinition\"\xc1\x01\n\x0eUserPermission\x12\x0c\n\x04host\x18\x01 \x01(\t\x12\x0c\n\x04user\x18\x02 \x01(\t\x12\x19\n\x11password_checksum\x18\x03 \x01(\x04\x12\x45\n\nprivileges\x18\x04 \x03(\x0b\x32\x31.tabletmanagerdata.UserPermission.PrivilegesEntry\x1a\x31\n\x0fPrivilegesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xae\x01\n\x0c\x44\x62Permission\x12\x0c\n\x04host\x18\x01 \x01(\t\x12\n\n\x02\x64\x62\x18\x02 \x01(\t\x12\x0c\n\x04user\x18\x03 \x01(\t\x12\x43\n\nprivileges\x18\x04 \x03(\x0b\x32/.tabletmanagerdata.DbPermission.PrivilegesEntry\x1a\x31\n\x0fPrivilegesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x83\x01\n\x0bPermissions\x12;\n\x10user_permissions\x18\x01 \x03(\x0b\x32!.tabletmanagerdata.UserPermission\x12\x37\n\x0e\x64\x62_permissions\x18\x02 \x03(\x0b\x32\x1f.tabletmanagerdata.DbPermission\"\x1e\n\x0bPingRequest\x12\x0f\n\x07payload\x18\x01 \x01(\t\"\x1f\n\x0cPingResponse\x12\x0f\n\x07payload\x18\x01 \x01(\t\" \n\x0cSleepRequest\x12\x10\n\x08\x64uration\x18\x01 \x01(\x03\"\x0f\n\rSleepResponse\"\xaf\x01\n\x12\x45xecuteHookRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\nparameters\x18\x02 \x03(\t\x12\x46\n\textra_env\x18\x03 \x03(\x0b\x32\x33.tabletmanagerdata.ExecuteHookRequest.ExtraEnvEntry\x1a/\n\rExtraEnvEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"J\n\x13\x45xecuteHookResponse\x12\x13\n\x0b\x65xit_status\x18\x01 \x01(\x03\x12\x0e\n\x06stdout\x18\x02 \x01(\t\x12\x0e\n\x06stderr\x18\x03 \x01(\t\"Q\n\x10GetSchemaRequest\x12\x0e\n\x06tables\x18\x01 \x03(\t\x12\x15\n\rinclude_views\x18\x02 \x01(\x08\x12\x16\n\x0e\x65xclude_tables\x18\x03 \x03(\t\"S\n\x11GetSchemaResponse\x12>\n\x11schema_definition\x18\x01 \x01(\x0b\x32#.tabletmanagerdata.SchemaDefinition\"\x17\n\x15GetPermissionsRequest\"M\n\x16GetPermissionsResponse\x12\x33\n\x0bpermissions\x18\x01 \x01(\x0b\x32\x1e.tabletmanagerdata.Permissions\"\x14\n\x12SetReadOnlyRequest\"\x15\n\x13SetReadOnlyResponse\"\x15\n\x13SetReadWriteRequest\"\x16\n\x14SetReadWriteResponse\">\n\x11\x43hangeTypeRequest\x12)\n\x0btablet_type\x18\x01 \x01(\x0e\x32\x14.topodata.TabletType\"\x14\n\x12\x43hangeTypeResponse\"\x15\n\x13RefreshStateRequest\"\x16\n\x14RefreshStateResponse\"\x17\n\x15RunHealthCheckRequest\"\x18\n\x16RunHealthCheckResponse\"+\n\x18IgnoreHealthErrorRequest\x12\x0f\n\x07pattern\x18\x01 \x01(\t\"\x1b\n\x19IgnoreHealthErrorResponse\",\n\x13ReloadSchemaRequest\x12\x15\n\rwait_position\x18\x01 \x01(\t\"\x16\n\x14ReloadSchemaResponse\")\n\x16PreflightSchemaRequest\x12\x0f\n\x07\x63hanges\x18\x01 \x03(\t\"X\n\x17PreflightSchemaResponse\x12=\n\x0e\x63hange_results\x18\x01 \x03(\x0b\x32%.tabletmanagerdata.SchemaChangeResult\"\xc2\x01\n\x12\x41pplySchemaRequest\x12\x0b\n\x03sql\x18\x01 \x01(\t\x12\r\n\x05\x66orce\x18\x02 \x01(\x08\x12\x19\n\x11\x61llow_replication\x18\x03 \x01(\x08\x12:\n\rbefore_schema\x18\x04 \x01(\x0b\x32#.tabletmanagerdata.SchemaDefinition\x12\x39\n\x0c\x61\x66ter_schema\x18\x05 \x01(\x0b\x32#.tabletmanagerdata.SchemaDefinition\"\x8c\x01\n\x13\x41pplySchemaResponse\x12:\n\rbefore_schema\x18\x01 \x01(\x0b\x32#.tabletmanagerdata.SchemaDefinition\x12\x39\n\x0c\x61\x66ter_schema\x18\x02 \x01(\x0b\x32#.tabletmanagerdata.SchemaDefinition\"\x13\n\x11LockTablesRequest\"\x14\n\x12LockTablesResponse\"\x15\n\x13UnlockTablesRequest\"\x16\n\x14UnlockTablesResponse\"|\n\x18\x45xecuteFetchAsDbaRequest\x12\r\n\x05query\x18\x01 \x01(\x0c\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x10\n\x08max_rows\x18\x03 \x01(\x04\x12\x17\n\x0f\x64isable_binlogs\x18\x04 \x01(\x08\x12\x15\n\rreload_schema\x18\x05 \x01(\x08\"?\n\x19\x45xecuteFetchAsDbaResponse\x12\"\n\x06result\x18\x01 \x01(\x0b\x32\x12.query.QueryResult\"h\n\x1d\x45xecuteFetchAsAllPrivsRequest\x12\r\n\x05query\x18\x01 \x01(\x0c\x12\x0f\n\x07\x64\x62_name\x18\x02 \x01(\t\x12\x10\n\x08max_rows\x18\x03 \x01(\x04\x12\x15\n\rreload_schema\x18\x04 \x01(\x08\"D\n\x1e\x45xecuteFetchAsAllPrivsResponse\x12\"\n\x06result\x18\x01 \x01(\x0b\x32\x12.query.QueryResult\";\n\x18\x45xecuteFetchAsAppRequest\x12\r\n\x05query\x18\x01 \x01(\x0c\x12\x10\n\x08max_rows\x18\x02 \x01(\x04\"?\n\x19\x45xecuteFetchAsAppResponse\x12\"\n\x06result\x18\x01 \x01(\x0b\x32\x12.query.QueryResult\"\x14\n\x12SlaveStatusRequest\">\n\x13SlaveStatusResponse\x12\'\n\x06status\x18\x01 \x01(\x0b\x32\x17.replicationdata.Status\"\x17\n\x15MasterPositionRequest\"*\n\x16MasterPositionResponse\x12\x10\n\x08position\x18\x01 \x01(\t\"\x12\n\x10StopSlaveRequest\"\x13\n\x11StopSlaveResponse\"A\n\x17StopSlaveMinimumRequest\x12\x10\n\x08position\x18\x01 \x01(\t\x12\x14\n\x0cwait_timeout\x18\x02 \x01(\x03\",\n\x18StopSlaveMinimumResponse\x12\x10\n\x08position\x18\x01 \x01(\t\"\x13\n\x11StartSlaveRequest\"\x14\n\x12StartSlaveResponse\"E\n\x1bStartSlaveUntilAfterRequest\x12\x10\n\x08position\x18\x01 \x01(\t\x12\x14\n\x0cwait_timeout\x18\x02 \x01(\x03\"\x1e\n\x1cStartSlaveUntilAfterResponse\"8\n!TabletExternallyReparentedRequest\x12\x13\n\x0b\x65xternal_id\x18\x01 \x01(\t\"$\n\"TabletExternallyReparentedResponse\" \n\x1eTabletExternallyElectedRequest\"!\n\x1fTabletExternallyElectedResponse\"\x12\n\x10GetSlavesRequest\"\"\n\x11GetSlavesResponse\x12\r\n\x05\x61\x64\x64rs\x18\x01 \x03(\t\"\x19\n\x17ResetReplicationRequest\"\x1a\n\x18ResetReplicationResponse\"(\n\x17VReplicationExecRequest\x12\r\n\x05query\x18\x01 \x01(\t\">\n\x18VReplicationExecResponse\x12\"\n\x06result\x18\x01 \x01(\x0b\x32\x12.query.QueryResult\"=\n\x1dVReplicationWaitForPosRequest\x12\n\n\x02id\x18\x01 \x01(\x03\x12\x10\n\x08position\x18\x02 \x01(\t\" \n\x1eVReplicationWaitForPosResponse\"\x13\n\x11InitMasterRequest\"&\n\x12InitMasterResponse\x12\x10\n\x08position\x18\x01 \x01(\t\"\x99\x01\n\x1ePopulateReparentJournalRequest\x12\x17\n\x0ftime_created_ns\x18\x01 \x01(\x03\x12\x13\n\x0b\x61\x63tion_name\x18\x02 \x01(\t\x12+\n\x0cmaster_alias\x18\x03 \x01(\x0b\x32\x15.topodata.TabletAlias\x12\x1c\n\x14replication_position\x18\x04 \x01(\t\"!\n\x1fPopulateReparentJournalResponse\"p\n\x10InitSlaveRequest\x12%\n\x06parent\x18\x01 \x01(\x0b\x32\x15.topodata.TabletAlias\x12\x1c\n\x14replication_position\x18\x02 \x01(\t\x12\x17\n\x0ftime_created_ns\x18\x03 \x01(\x03\"\x13\n\x11InitSlaveResponse\"\x15\n\x13\x44\x65moteMasterRequest\"(\n\x14\x44\x65moteMasterResponse\x12\x10\n\x08position\x18\x01 \x01(\t\"\x19\n\x17UndoDemoteMasterRequest\"\x1a\n\x18UndoDemoteMasterResponse\"3\n\x1fPromoteSlaveWhenCaughtUpRequest\x12\x10\n\x08position\x18\x01 \x01(\t\"4\n PromoteSlaveWhenCaughtUpResponse\x12\x10\n\x08position\x18\x01 \x01(\t\"\x19\n\x17SlaveWasPromotedRequest\"\x1a\n\x18SlaveWasPromotedResponse\"m\n\x10SetMasterRequest\x12%\n\x06parent\x18\x01 \x01(\x0b\x32\x15.topodata.TabletAlias\x12\x17\n\x0ftime_created_ns\x18\x02 \x01(\x03\x12\x19\n\x11\x66orce_start_slave\x18\x03 \x01(\x08\"\x13\n\x11SetMasterResponse\"A\n\x18SlaveWasRestartedRequest\x12%\n\x06parent\x18\x01 \x01(\x0b\x32\x15.topodata.TabletAlias\"\x1b\n\x19SlaveWasRestartedResponse\"$\n\"StopReplicationAndGetStatusRequest\"N\n#StopReplicationAndGetStatusResponse\x12\'\n\x06status\x18\x01 \x01(\x0b\x32\x17.replicationdata.Status\"\x15\n\x13PromoteSlaveRequest\"(\n\x14PromoteSlaveResponse\x12\x10\n\x08position\x18\x01 \x01(\t\"9\n\rBackupRequest\x12\x13\n\x0b\x63oncurrency\x18\x01 \x01(\x03\x12\x13\n\x0b\x61llowMaster\x18\x02 \x01(\x08\"/\n\x0e\x42\x61\x63kupResponse\x12\x1d\n\x05\x65vent\x18\x01 \x01(\x0b\x32\x0e.logutil.Event\"\x1a\n\x18RestoreFromBackupRequest\":\n\x19RestoreFromBackupResponse\x12\x1d\n\x05\x65vent\x18\x01 \x01(\x0b\x32\x0e.logutil.EventB0Z.vitess.io/vitess/go/vt/proto/tabletmanagerdatab\x06proto3')
,
dependencies=[query__pb2.DESCRIPTOR,topodata__pb2.DESCRIPTOR,replicationdata__pb2.DESCRIPTOR,logutil__pb2.DESCRIPTOR,])
_TABLEDEFINITION = _descriptor.Descriptor(
name='TableDefinition',
full_name='tabletmanagerdata.TableDefinition',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='tabletmanagerdata.TableDefinition.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='schema', full_name='tabletmanagerdata.TableDefinition.schema', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='columns', full_name='tabletmanagerdata.TableDefinition.columns', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='primary_key_columns', full_name='tabletmanagerdata.TableDefinition.primary_key_columns', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='tabletmanagerdata.TableDefinition.type', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data_length', full_name='tabletmanagerdata.TableDefinition.data_length', index=5,
number=6, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='row_count', full_name='tabletmanagerdata.TableDefinition.row_count', index=6,
number=7, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=114,
serialized_end=261,
)
_SCHEMADEFINITION = _descriptor.Descriptor(
name='SchemaDefinition',
full_name='tabletmanagerdata.SchemaDefinition',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='database_schema', full_name='tabletmanagerdata.SchemaDefinition.database_schema', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='table_definitions', full_name='tabletmanagerdata.SchemaDefinition.table_definitions', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='version', full_name='tabletmanagerdata.SchemaDefinition.version', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=263,
serialized_end=386,
)
_SCHEMACHANGERESULT = _descriptor.Descriptor(
name='SchemaChangeResult',
full_name='tabletmanagerdata.SchemaChangeResult',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='before_schema', full_name='tabletmanagerdata.SchemaChangeResult.before_schema', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='after_schema', full_name='tabletmanagerdata.SchemaChangeResult.after_schema', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=389,
serialized_end=528,
)
_USERPERMISSION_PRIVILEGESENTRY = _descriptor.Descriptor(
name='PrivilegesEntry',
full_name='tabletmanagerdata.UserPermission.PrivilegesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='tabletmanagerdata.UserPermission.PrivilegesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='tabletmanagerdata.UserPermission.PrivilegesEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=675,
serialized_end=724,
)
_USERPERMISSION = _descriptor.Descriptor(
name='UserPermission',
full_name='tabletmanagerdata.UserPermission',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='host', full_name='tabletmanagerdata.UserPermission.host', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='user', full_name='tabletmanagerdata.UserPermission.user', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='password_checksum', full_name='tabletmanagerdata.UserPermission.password_checksum', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='privileges', full_name='tabletmanagerdata.UserPermission.privileges', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_USERPERMISSION_PRIVILEGESENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=531,
serialized_end=724,
)
_DBPERMISSION_PRIVILEGESENTRY = _descriptor.Descriptor(
name='PrivilegesEntry',
full_name='tabletmanagerdata.DbPermission.PrivilegesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='tabletmanagerdata.DbPermission.PrivilegesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='tabletmanagerdata.DbPermission.PrivilegesEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=675,
serialized_end=724,
)
_DBPERMISSION = _descriptor.Descriptor(
name='DbPermission',
full_name='tabletmanagerdata.DbPermission',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='host', full_name='tabletmanagerdata.DbPermission.host', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='db', full_name='tabletmanagerdata.DbPermission.db', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='user', full_name='tabletmanagerdata.DbPermission.user', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='privileges', full_name='tabletmanagerdata.DbPermission.privileges', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_DBPERMISSION_PRIVILEGESENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=727,
serialized_end=901,
)
_PERMISSIONS = _descriptor.Descriptor(
name='Permissions',
full_name='tabletmanagerdata.Permissions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='user_permissions', full_name='tabletmanagerdata.Permissions.user_permissions', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='db_permissions', full_name='tabletmanagerdata.Permissions.db_permissions', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=904,
serialized_end=1035,
)
_PINGREQUEST = _descriptor.Descriptor(
name='PingRequest',
full_name='tabletmanagerdata.PingRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='payload', full_name='tabletmanagerdata.PingRequest.payload', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1037,
serialized_end=1067,
)
_PINGRESPONSE = _descriptor.Descriptor(
name='PingResponse',
full_name='tabletmanagerdata.PingResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='payload', full_name='tabletmanagerdata.PingResponse.payload', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1069,
serialized_end=1100,
)
_SLEEPREQUEST = _descriptor.Descriptor(
name='SleepRequest',
full_name='tabletmanagerdata.SleepRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='duration', full_name='tabletmanagerdata.SleepRequest.duration', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1102,
serialized_end=1134,
)
_SLEEPRESPONSE = _descriptor.Descriptor(
name='SleepResponse',
full_name='tabletmanagerdata.SleepResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1136,
serialized_end=1151,
)
_EXECUTEHOOKREQUEST_EXTRAENVENTRY = _descriptor.Descriptor(
name='ExtraEnvEntry',
full_name='tabletmanagerdata.ExecuteHookRequest.ExtraEnvEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='tabletmanagerdata.ExecuteHookRequest.ExtraEnvEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='tabletmanagerdata.ExecuteHookRequest.ExtraEnvEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1282,
serialized_end=1329,
)
_EXECUTEHOOKREQUEST = _descriptor.Descriptor(
name='ExecuteHookRequest',
full_name='tabletmanagerdata.ExecuteHookRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='tabletmanagerdata.ExecuteHookRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='parameters', full_name='tabletmanagerdata.ExecuteHookRequest.parameters', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='extra_env', full_name='tabletmanagerdata.ExecuteHookRequest.extra_env', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_EXECUTEHOOKREQUEST_EXTRAENVENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1154,
serialized_end=1329,
)
_EXECUTEHOOKRESPONSE = _descriptor.Descriptor(
name='ExecuteHookResponse',
full_name='tabletmanagerdata.ExecuteHookResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='exit_status', full_name='tabletmanagerdata.ExecuteHookResponse.exit_status', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stdout', full_name='tabletmanagerdata.ExecuteHookResponse.stdout', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stderr', full_name='tabletmanagerdata.ExecuteHookResponse.stderr', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1331,
serialized_end=1405,
)
_GETSCHEMAREQUEST = _descriptor.Descriptor(
name='GetSchemaRequest',
full_name='tabletmanagerdata.GetSchemaRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='tables', full_name='tabletmanagerdata.GetSchemaRequest.tables', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='include_views', full_name='tabletmanagerdata.GetSchemaRequest.include_views', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='exclude_tables', full_name='tabletmanagerdata.GetSchemaRequest.exclude_tables', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1407,
serialized_end=1488,
)
_GETSCHEMARESPONSE = _descriptor.Descriptor(
name='GetSchemaResponse',
full_name='tabletmanagerdata.GetSchemaResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='schema_definition', full_name='tabletmanagerdata.GetSchemaResponse.schema_definition', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1490,
serialized_end=1573,
)
_GETPERMISSIONSREQUEST = _descriptor.Descriptor(
name='GetPermissionsRequest',
full_name='tabletmanagerdata.GetPermissionsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1575,
serialized_end=1598,
)
_GETPERMISSIONSRESPONSE = _descriptor.Descriptor(
name='GetPermissionsResponse',
full_name='tabletmanagerdata.GetPermissionsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='permissions', full_name='tabletmanagerdata.GetPermissionsResponse.permissions', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1600,
serialized_end=1677,
)
_SETREADONLYREQUEST = _descriptor.Descriptor(
name='SetReadOnlyRequest',
full_name='tabletmanagerdata.SetReadOnlyRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1679,
serialized_end=1699,
)
_SETREADONLYRESPONSE = _descriptor.Descriptor(
name='SetReadOnlyResponse',
full_name='tabletmanagerdata.SetReadOnlyResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1701,
serialized_end=1722,
)
_SETREADWRITEREQUEST = _descriptor.Descriptor(
name='SetReadWriteRequest',
full_name='tabletmanagerdata.SetReadWriteRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1724,
serialized_end=1745,
)
_SETREADWRITERESPONSE = _descriptor.Descriptor(
name='SetReadWriteResponse',
full_name='tabletmanagerdata.SetReadWriteResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1747,
serialized_end=1769,
)
_CHANGETYPEREQUEST = _descriptor.Descriptor(
name='ChangeTypeRequest',
full_name='tabletmanagerdata.ChangeTypeRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='tablet_type', full_name='tabletmanagerdata.ChangeTypeRequest.tablet_type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1771,
serialized_end=1833,
)
_CHANGETYPERESPONSE = _descriptor.Descriptor(
name='ChangeTypeResponse',
full_name='tabletmanagerdata.ChangeTypeResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1835,
serialized_end=1855,
)
_REFRESHSTATEREQUEST = _descriptor.Descriptor(
name='RefreshStateRequest',
full_name='tabletmanagerdata.RefreshStateRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1857,
serialized_end=1878,
)
_REFRESHSTATERESPONSE = _descriptor.Descriptor(
name='RefreshStateResponse',
full_name='tabletmanagerdata.RefreshStateResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1880,
serialized_end=1902,
)
_RUNHEALTHCHECKREQUEST = _descriptor.Descriptor(
name='RunHealthCheckRequest',
full_name='tabletmanagerdata.RunHealthCheckRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1904,
serialized_end=1927,
)
_RUNHEALTHCHECKRESPONSE = _descriptor.Descriptor(
name='RunHealthCheckResponse',
full_name='tabletmanagerdata.RunHealthCheckResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1929,
serialized_end=1953,
)
_IGNOREHEALTHERRORREQUEST = _descriptor.Descriptor(
name='IgnoreHealthErrorRequest',
full_name='tabletmanagerdata.IgnoreHealthErrorRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pattern', full_name='tabletmanagerdata.IgnoreHealthErrorRequest.pattern', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1955,
serialized_end=1998,
)
_IGNOREHEALTHERRORRESPONSE = _descriptor.Descriptor(
name='IgnoreHealthErrorResponse',
full_name='tabletmanagerdata.IgnoreHealthErrorResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2000,
serialized_end=2027,
)
_RELOADSCHEMAREQUEST = _descriptor.Descriptor(
name='ReloadSchemaRequest',
full_name='tabletmanagerdata.ReloadSchemaRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='wait_position', full_name='tabletmanagerdata.ReloadSchemaRequest.wait_position', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2029,
serialized_end=2073,
)
_RELOADSCHEMARESPONSE = _descriptor.Descriptor(
name='ReloadSchemaResponse',
full_name='tabletmanagerdata.ReloadSchemaResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2075,
serialized_end=2097,
)
_PREFLIGHTSCHEMAREQUEST = _descriptor.Descriptor(
name='PreflightSchemaRequest',
full_name='tabletmanagerdata.PreflightSchemaRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='changes', full_name='tabletmanagerdata.PreflightSchemaRequest.changes', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2099,
serialized_end=2140,
)
_PREFLIGHTSCHEMARESPONSE = _descriptor.Descriptor(
name='PreflightSchemaResponse',
full_name='tabletmanagerdata.PreflightSchemaResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='change_results', full_name='tabletmanagerdata.PreflightSchemaResponse.change_results', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2142,
serialized_end=2230,
)
_APPLYSCHEMAREQUEST = _descriptor.Descriptor(
name='ApplySchemaRequest',
full_name='tabletmanagerdata.ApplySchemaRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='sql', full_name='tabletmanagerdata.ApplySchemaRequest.sql', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='force', full_name='tabletmanagerdata.ApplySchemaRequest.force', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='allow_replication', full_name='tabletmanagerdata.ApplySchemaRequest.allow_replication', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='before_schema', full_name='tabletmanagerdata.ApplySchemaRequest.before_schema', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='after_schema', full_name='tabletmanagerdata.ApplySchemaRequest.after_schema', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2233,
serialized_end=2427,
)
_APPLYSCHEMARESPONSE = _descriptor.Descriptor(
name='ApplySchemaResponse',
full_name='tabletmanagerdata.ApplySchemaResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='before_schema', full_name='tabletmanagerdata.ApplySchemaResponse.before_schema', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='after_schema', full_name='tabletmanagerdata.ApplySchemaResponse.after_schema', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2430,
serialized_end=2570,
)
_LOCKTABLESREQUEST = _descriptor.Descriptor(
name='LockTablesRequest',
full_name='tabletmanagerdata.LockTablesRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2572,
serialized_end=2591,
)
_LOCKTABLESRESPONSE = _descriptor.Descriptor(
name='LockTablesResponse',
full_name='tabletmanagerdata.LockTablesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2593,
serialized_end=2613,
)
_UNLOCKTABLESREQUEST = _descriptor.Descriptor(
name='UnlockTablesRequest',
full_name='tabletmanagerdata.UnlockTablesRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2615,
serialized_end=2636,
)
_UNLOCKTABLESRESPONSE = _descriptor.Descriptor(
name='UnlockTablesResponse',
full_name='tabletmanagerdata.UnlockTablesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2638,
serialized_end=2660,
)
_EXECUTEFETCHASDBAREQUEST = _descriptor.Descriptor(
name='ExecuteFetchAsDbaRequest',
full_name='tabletmanagerdata.ExecuteFetchAsDbaRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='query', full_name='tabletmanagerdata.ExecuteFetchAsDbaRequest.query', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='db_name', full_name='tabletmanagerdata.ExecuteFetchAsDbaRequest.db_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_rows', full_name='tabletmanagerdata.ExecuteFetchAsDbaRequest.max_rows', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='disable_binlogs', full_name='tabletmanagerdata.ExecuteFetchAsDbaRequest.disable_binlogs', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reload_schema', full_name='tabletmanagerdata.ExecuteFetchAsDbaRequest.reload_schema', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2662,
serialized_end=2786,
)
_EXECUTEFETCHASDBARESPONSE = _descriptor.Descriptor(
name='ExecuteFetchAsDbaResponse',
full_name='tabletmanagerdata.ExecuteFetchAsDbaResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='tabletmanagerdata.ExecuteFetchAsDbaResponse.result', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2788,
serialized_end=2851,
)
_EXECUTEFETCHASALLPRIVSREQUEST = _descriptor.Descriptor(
name='ExecuteFetchAsAllPrivsRequest',
full_name='tabletmanagerdata.ExecuteFetchAsAllPrivsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='query', full_name='tabletmanagerdata.ExecuteFetchAsAllPrivsRequest.query', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='db_name', full_name='tabletmanagerdata.ExecuteFetchAsAllPrivsRequest.db_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_rows', full_name='tabletmanagerdata.ExecuteFetchAsAllPrivsRequest.max_rows', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reload_schema', full_name='tabletmanagerdata.ExecuteFetchAsAllPrivsRequest.reload_schema', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2853,
serialized_end=2957,
)
_EXECUTEFETCHASALLPRIVSRESPONSE = _descriptor.Descriptor(
name='ExecuteFetchAsAllPrivsResponse',
full_name='tabletmanagerdata.ExecuteFetchAsAllPrivsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='tabletmanagerdata.ExecuteFetchAsAllPrivsResponse.result', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2959,
serialized_end=3027,
)
_EXECUTEFETCHASAPPREQUEST = _descriptor.Descriptor(
name='ExecuteFetchAsAppRequest',
full_name='tabletmanagerdata.ExecuteFetchAsAppRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='query', full_name='tabletmanagerdata.ExecuteFetchAsAppRequest.query', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_rows', full_name='tabletmanagerdata.ExecuteFetchAsAppRequest.max_rows', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3029,
serialized_end=3088,
)
_EXECUTEFETCHASAPPRESPONSE = _descriptor.Descriptor(
name='ExecuteFetchAsAppResponse',
full_name='tabletmanagerdata.ExecuteFetchAsAppResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='tabletmanagerdata.ExecuteFetchAsAppResponse.result', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3090,
serialized_end=3153,
)
_SLAVESTATUSREQUEST = _descriptor.Descriptor(
name='SlaveStatusRequest',
full_name='tabletmanagerdata.SlaveStatusRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3155,
serialized_end=3175,
)
_SLAVESTATUSRESPONSE = _descriptor.Descriptor(
name='SlaveStatusResponse',
full_name='tabletmanagerdata.SlaveStatusResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='tabletmanagerdata.SlaveStatusResponse.status', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3177,
serialized_end=3239,
)
_MASTERPOSITIONREQUEST = _descriptor.Descriptor(
name='MasterPositionRequest',
full_name='tabletmanagerdata.MasterPositionRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3241,
serialized_end=3264,
)
_MASTERPOSITIONRESPONSE = _descriptor.Descriptor(
name='MasterPositionResponse',
full_name='tabletmanagerdata.MasterPositionResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='position', full_name='tabletmanagerdata.MasterPositionResponse.position', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3266,
serialized_end=3308,
)
_STOPSLAVEREQUEST = _descriptor.Descriptor(
name='StopSlaveRequest',
full_name='tabletmanagerdata.StopSlaveRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3310,
serialized_end=3328,
)
_STOPSLAVERESPONSE = _descriptor.Descriptor(
name='StopSlaveResponse',
full_name='tabletmanagerdata.StopSlaveResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3330,
serialized_end=3349,
)
_STOPSLAVEMINIMUMREQUEST = _descriptor.Descriptor(
name='StopSlaveMinimumRequest',
full_name='tabletmanagerdata.StopSlaveMinimumRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='position', full_name='tabletmanagerdata.StopSlaveMinimumRequest.position', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='wait_timeout', full_name='tabletmanagerdata.StopSlaveMinimumRequest.wait_timeout', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3351,
serialized_end=3416,
)
_STOPSLAVEMINIMUMRESPONSE = _descriptor.Descriptor(
name='StopSlaveMinimumResponse',
full_name='tabletmanagerdata.StopSlaveMinimumResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='position', full_name='tabletmanagerdata.StopSlaveMinimumResponse.position', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3418,
serialized_end=3462,
)
_STARTSLAVEREQUEST = _descriptor.Descriptor(
name='StartSlaveRequest',
full_name='tabletmanagerdata.StartSlaveRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3464,
serialized_end=3483,
)
_STARTSLAVERESPONSE = _descriptor.Descriptor(
name='StartSlaveResponse',
full_name='tabletmanagerdata.StartSlaveResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3485,
serialized_end=3505,
)
_STARTSLAVEUNTILAFTERREQUEST = _descriptor.Descriptor(
name='StartSlaveUntilAfterRequest',
full_name='tabletmanagerdata.StartSlaveUntilAfterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='position', full_name='tabletmanagerdata.StartSlaveUntilAfterRequest.position', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='wait_timeout', full_name='tabletmanagerdata.StartSlaveUntilAfterRequest.wait_timeout', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3507,
serialized_end=3576,
)
_STARTSLAVEUNTILAFTERRESPONSE = _descriptor.Descriptor(
name='StartSlaveUntilAfterResponse',
full_name='tabletmanagerdata.StartSlaveUntilAfterResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3578,
serialized_end=3608,
)
_TABLETEXTERNALLYREPARENTEDREQUEST = _descriptor.Descriptor(
name='TabletExternallyReparentedRequest',
full_name='tabletmanagerdata.TabletExternallyReparentedRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='external_id', full_name='tabletmanagerdata.TabletExternallyReparentedRequest.external_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3610,
serialized_end=3666,
)
_TABLETEXTERNALLYREPARENTEDRESPONSE = _descriptor.Descriptor(
name='TabletExternallyReparentedResponse',
full_name='tabletmanagerdata.TabletExternallyReparentedResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3668,
serialized_end=3704,
)
_TABLETEXTERNALLYELECTEDREQUEST = _descriptor.Descriptor(
name='TabletExternallyElectedRequest',
full_name='tabletmanagerdata.TabletExternallyElectedRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3706,
serialized_end=3738,
)
_TABLETEXTERNALLYELECTEDRESPONSE = _descriptor.Descriptor(
name='TabletExternallyElectedResponse',
full_name='tabletmanagerdata.TabletExternallyElectedResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3740,
serialized_end=3773,
)
_GETSLAVESREQUEST = _descriptor.Descriptor(
name='GetSlavesRequest',
full_name='tabletmanagerdata.GetSlavesRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3775,
serialized_end=3793,
)
_GETSLAVESRESPONSE = _descriptor.Descriptor(
name='GetSlavesResponse',
full_name='tabletmanagerdata.GetSlavesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='addrs', full_name='tabletmanagerdata.GetSlavesResponse.addrs', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3795,
serialized_end=3829,
)
_RESETREPLICATIONREQUEST = _descriptor.Descriptor(
name='ResetReplicationRequest',
full_name='tabletmanagerdata.ResetReplicationRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3831,
serialized_end=3856,
)
_RESETREPLICATIONRESPONSE = _descriptor.Descriptor(
name='ResetReplicationResponse',
full_name='tabletmanagerdata.ResetReplicationResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3858,
serialized_end=3884,
)
_VREPLICATIONEXECREQUEST = _descriptor.Descriptor(
name='VReplicationExecRequest',
full_name='tabletmanagerdata.VReplicationExecRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='query', full_name='tabletmanagerdata.VReplicationExecRequest.query', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3886,
serialized_end=3926,
)
_VREPLICATIONEXECRESPONSE = _descriptor.Descriptor(
name='VReplicationExecResponse',
full_name='tabletmanagerdata.VReplicationExecResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='tabletmanagerdata.VReplicationExecResponse.result', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3928,
serialized_end=3990,
)
_VREPLICATIONWAITFORPOSREQUEST = _descriptor.Descriptor(
name='VReplicationWaitForPosRequest',
full_name='tabletmanagerdata.VReplicationWaitForPosRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='tabletmanagerdata.VReplicationWaitForPosRequest.id', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='position', full_name='tabletmanagerdata.VReplicationWaitForPosRequest.position', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3992,
serialized_end=4053,
)
_VREPLICATIONWAITFORPOSRESPONSE = _descriptor.Descriptor(
name='VReplicationWaitForPosResponse',
full_name='tabletmanagerdata.VReplicationWaitForPosResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4055,
serialized_end=4087,
)
_INITMASTERREQUEST = _descriptor.Descriptor(
name='InitMasterRequest',
full_name='tabletmanagerdata.InitMasterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4089,
serialized_end=4108,
)
_INITMASTERRESPONSE = _descriptor.Descriptor(
name='InitMasterResponse',
full_name='tabletmanagerdata.InitMasterResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='position', full_name='tabletmanagerdata.InitMasterResponse.position', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4110,
serialized_end=4148,
)
_POPULATEREPARENTJOURNALREQUEST = _descriptor.Descriptor(
name='PopulateReparentJournalRequest',
full_name='tabletmanagerdata.PopulateReparentJournalRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='time_created_ns', full_name='tabletmanagerdata.PopulateReparentJournalRequest.time_created_ns', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='action_name', full_name='tabletmanagerdata.PopulateReparentJournalRequest.action_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='master_alias', full_name='tabletmanagerdata.PopulateReparentJournalRequest.master_alias', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='replication_position', full_name='tabletmanagerdata.PopulateReparentJournalRequest.replication_position', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4151,
serialized_end=4304,
)
_POPULATEREPARENTJOURNALRESPONSE = _descriptor.Descriptor(
name='PopulateReparentJournalResponse',
full_name='tabletmanagerdata.PopulateReparentJournalResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4306,
serialized_end=4339,
)
_INITSLAVEREQUEST = _descriptor.Descriptor(
name='InitSlaveRequest',
full_name='tabletmanagerdata.InitSlaveRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='parent', full_name='tabletmanagerdata.InitSlaveRequest.parent', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='replication_position', full_name='tabletmanagerdata.InitSlaveRequest.replication_position', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='time_created_ns', full_name='tabletmanagerdata.InitSlaveRequest.time_created_ns', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4341,
serialized_end=4453,
)
_INITSLAVERESPONSE = _descriptor.Descriptor(
name='InitSlaveResponse',
full_name='tabletmanagerdata.InitSlaveResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4455,
serialized_end=4474,
)
_DEMOTEMASTERREQUEST = _descriptor.Descriptor(
name='DemoteMasterRequest',
full_name='tabletmanagerdata.DemoteMasterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4476,
serialized_end=4497,
)
_DEMOTEMASTERRESPONSE = _descriptor.Descriptor(
name='DemoteMasterResponse',
full_name='tabletmanagerdata.DemoteMasterResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='position', full_name='tabletmanagerdata.DemoteMasterResponse.position', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4499,
serialized_end=4539,
)
_UNDODEMOTEMASTERREQUEST = _descriptor.Descriptor(
name='UndoDemoteMasterRequest',
full_name='tabletmanagerdata.UndoDemoteMasterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4541,
serialized_end=4566,
)
_UNDODEMOTEMASTERRESPONSE = _descriptor.Descriptor(
name='UndoDemoteMasterResponse',
full_name='tabletmanagerdata.UndoDemoteMasterResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4568,
serialized_end=4594,
)
_PROMOTESLAVEWHENCAUGHTUPREQUEST = _descriptor.Descriptor(
name='PromoteSlaveWhenCaughtUpRequest',
full_name='tabletmanagerdata.PromoteSlaveWhenCaughtUpRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='position', full_name='tabletmanagerdata.PromoteSlaveWhenCaughtUpRequest.position', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4596,
serialized_end=4647,
)
_PROMOTESLAVEWHENCAUGHTUPRESPONSE = _descriptor.Descriptor(
name='PromoteSlaveWhenCaughtUpResponse',
full_name='tabletmanagerdata.PromoteSlaveWhenCaughtUpResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='position', full_name='tabletmanagerdata.PromoteSlaveWhenCaughtUpResponse.position', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4649,
serialized_end=4701,
)
_SLAVEWASPROMOTEDREQUEST = _descriptor.Descriptor(
name='SlaveWasPromotedRequest',
full_name='tabletmanagerdata.SlaveWasPromotedRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4703,
serialized_end=4728,
)
_SLAVEWASPROMOTEDRESPONSE = _descriptor.Descriptor(
name='SlaveWasPromotedResponse',
full_name='tabletmanagerdata.SlaveWasPromotedResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4730,
serialized_end=4756,
)
_SETMASTERREQUEST = _descriptor.Descriptor(
name='SetMasterRequest',
full_name='tabletmanagerdata.SetMasterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='parent', full_name='tabletmanagerdata.SetMasterRequest.parent', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='time_created_ns', full_name='tabletmanagerdata.SetMasterRequest.time_created_ns', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='force_start_slave', full_name='tabletmanagerdata.SetMasterRequest.force_start_slave', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4758,
serialized_end=4867,
)
_SETMASTERRESPONSE = _descriptor.Descriptor(
name='SetMasterResponse',
full_name='tabletmanagerdata.SetMasterResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4869,
serialized_end=4888,
)
_SLAVEWASRESTARTEDREQUEST = _descriptor.Descriptor(
name='SlaveWasRestartedRequest',
full_name='tabletmanagerdata.SlaveWasRestartedRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='parent', full_name='tabletmanagerdata.SlaveWasRestartedRequest.parent', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4890,
serialized_end=4955,
)
_SLAVEWASRESTARTEDRESPONSE = _descriptor.Descriptor(
name='SlaveWasRestartedResponse',
full_name='tabletmanagerdata.SlaveWasRestartedResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4957,
serialized_end=4984,
)
_STOPREPLICATIONANDGETSTATUSREQUEST = _descriptor.Descriptor(
name='StopReplicationAndGetStatusRequest',
full_name='tabletmanagerdata.StopReplicationAndGetStatusRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4986,
serialized_end=5022,
)
_STOPREPLICATIONANDGETSTATUSRESPONSE = _descriptor.Descriptor(
name='StopReplicationAndGetStatusResponse',
full_name='tabletmanagerdata.StopReplicationAndGetStatusResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='tabletmanagerdata.StopReplicationAndGetStatusResponse.status', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5024,
serialized_end=5102,
)
_PROMOTESLAVEREQUEST = _descriptor.Descriptor(
name='PromoteSlaveRequest',
full_name='tabletmanagerdata.PromoteSlaveRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5104,
serialized_end=5125,
)
_PROMOTESLAVERESPONSE = _descriptor.Descriptor(
name='PromoteSlaveResponse',
full_name='tabletmanagerdata.PromoteSlaveResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='position', full_name='tabletmanagerdata.PromoteSlaveResponse.position', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5127,
serialized_end=5167,
)
_BACKUPREQUEST = _descriptor.Descriptor(
name='BackupRequest',
full_name='tabletmanagerdata.BackupRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='concurrency', full_name='tabletmanagerdata.BackupRequest.concurrency', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='allowMaster', full_name='tabletmanagerdata.BackupRequest.allowMaster', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5169,
serialized_end=5226,
)
_BACKUPRESPONSE = _descriptor.Descriptor(
name='BackupResponse',
full_name='tabletmanagerdata.BackupResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='event', full_name='tabletmanagerdata.BackupResponse.event', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5228,
serialized_end=5275,
)
_RESTOREFROMBACKUPREQUEST = _descriptor.Descriptor(
name='RestoreFromBackupRequest',
full_name='tabletmanagerdata.RestoreFromBackupRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5277,
serialized_end=5303,
)
_RESTOREFROMBACKUPRESPONSE = _descriptor.Descriptor(
name='RestoreFromBackupResponse',
full_name='tabletmanagerdata.RestoreFromBackupResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='event', full_name='tabletmanagerdata.RestoreFromBackupResponse.event', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5305,
serialized_end=5363,
)
_SCHEMADEFINITION.fields_by_name['table_definitions'].message_type = _TABLEDEFINITION
_SCHEMACHANGERESULT.fields_by_name['before_schema'].message_type = _SCHEMADEFINITION
_SCHEMACHANGERESULT.fields_by_name['after_schema'].message_type = _SCHEMADEFINITION
_USERPERMISSION_PRIVILEGESENTRY.containing_type = _USERPERMISSION
_USERPERMISSION.fields_by_name['privileges'].message_type = _USERPERMISSION_PRIVILEGESENTRY
_DBPERMISSION_PRIVILEGESENTRY.containing_type = _DBPERMISSION
_DBPERMISSION.fields_by_name['privileges'].message_type = _DBPERMISSION_PRIVILEGESENTRY
_PERMISSIONS.fields_by_name['user_permissions'].message_type = _USERPERMISSION
_PERMISSIONS.fields_by_name['db_permissions'].message_type = _DBPERMISSION
_EXECUTEHOOKREQUEST_EXTRAENVENTRY.containing_type = _EXECUTEHOOKREQUEST
_EXECUTEHOOKREQUEST.fields_by_name['extra_env'].message_type = _EXECUTEHOOKREQUEST_EXTRAENVENTRY
_GETSCHEMARESPONSE.fields_by_name['schema_definition'].message_type = _SCHEMADEFINITION
_GETPERMISSIONSRESPONSE.fields_by_name['permissions'].message_type = _PERMISSIONS
_CHANGETYPEREQUEST.fields_by_name['tablet_type'].enum_type = topodata__pb2._TABLETTYPE
_PREFLIGHTSCHEMARESPONSE.fields_by_name['change_results'].message_type = _SCHEMACHANGERESULT
_APPLYSCHEMAREQUEST.fields_by_name['before_schema'].message_type = _SCHEMADEFINITION
_APPLYSCHEMAREQUEST.fields_by_name['after_schema'].message_type = _SCHEMADEFINITION
_APPLYSCHEMARESPONSE.fields_by_name['before_schema'].message_type = _SCHEMADEFINITION
_APPLYSCHEMARESPONSE.fields_by_name['after_schema'].message_type = _SCHEMADEFINITION
_EXECUTEFETCHASDBARESPONSE.fields_by_name['result'].message_type = query__pb2._QUERYRESULT
_EXECUTEFETCHASALLPRIVSRESPONSE.fields_by_name['result'].message_type = query__pb2._QUERYRESULT
_EXECUTEFETCHASAPPRESPONSE.fields_by_name['result'].message_type = query__pb2._QUERYRESULT
_SLAVESTATUSRESPONSE.fields_by_name['status'].message_type = replicationdata__pb2._STATUS
_VREPLICATIONEXECRESPONSE.fields_by_name['result'].message_type = query__pb2._QUERYRESULT
_POPULATEREPARENTJOURNALREQUEST.fields_by_name['master_alias'].message_type = topodata__pb2._TABLETALIAS
_INITSLAVEREQUEST.fields_by_name['parent'].message_type = topodata__pb2._TABLETALIAS
_SETMASTERREQUEST.fields_by_name['parent'].message_type = topodata__pb2._TABLETALIAS
_SLAVEWASRESTARTEDREQUEST.fields_by_name['parent'].message_type = topodata__pb2._TABLETALIAS
_STOPREPLICATIONANDGETSTATUSRESPONSE.fields_by_name['status'].message_type = replicationdata__pb2._STATUS
_BACKUPRESPONSE.fields_by_name['event'].message_type = logutil__pb2._EVENT
_RESTOREFROMBACKUPRESPONSE.fields_by_name['event'].message_type = logutil__pb2._EVENT
DESCRIPTOR.message_types_by_name['TableDefinition'] = _TABLEDEFINITION
DESCRIPTOR.message_types_by_name['SchemaDefinition'] = _SCHEMADEFINITION
DESCRIPTOR.message_types_by_name['SchemaChangeResult'] = _SCHEMACHANGERESULT
DESCRIPTOR.message_types_by_name['UserPermission'] = _USERPERMISSION
DESCRIPTOR.message_types_by_name['DbPermission'] = _DBPERMISSION
DESCRIPTOR.message_types_by_name['Permissions'] = _PERMISSIONS
DESCRIPTOR.message_types_by_name['PingRequest'] = _PINGREQUEST
DESCRIPTOR.message_types_by_name['PingResponse'] = _PINGRESPONSE
DESCRIPTOR.message_types_by_name['SleepRequest'] = _SLEEPREQUEST
DESCRIPTOR.message_types_by_name['SleepResponse'] = _SLEEPRESPONSE
DESCRIPTOR.message_types_by_name['ExecuteHookRequest'] = _EXECUTEHOOKREQUEST
DESCRIPTOR.message_types_by_name['ExecuteHookResponse'] = _EXECUTEHOOKRESPONSE
DESCRIPTOR.message_types_by_name['GetSchemaRequest'] = _GETSCHEMAREQUEST
DESCRIPTOR.message_types_by_name['GetSchemaResponse'] = _GETSCHEMARESPONSE
DESCRIPTOR.message_types_by_name['GetPermissionsRequest'] = _GETPERMISSIONSREQUEST
DESCRIPTOR.message_types_by_name['GetPermissionsResponse'] = _GETPERMISSIONSRESPONSE
DESCRIPTOR.message_types_by_name['SetReadOnlyRequest'] = _SETREADONLYREQUEST
DESCRIPTOR.message_types_by_name['SetReadOnlyResponse'] = _SETREADONLYRESPONSE
DESCRIPTOR.message_types_by_name['SetReadWriteRequest'] = _SETREADWRITEREQUEST
DESCRIPTOR.message_types_by_name['SetReadWriteResponse'] = _SETREADWRITERESPONSE
DESCRIPTOR.message_types_by_name['ChangeTypeRequest'] = _CHANGETYPEREQUEST
DESCRIPTOR.message_types_by_name['ChangeTypeResponse'] = _CHANGETYPERESPONSE
DESCRIPTOR.message_types_by_name['RefreshStateRequest'] = _REFRESHSTATEREQUEST
DESCRIPTOR.message_types_by_name['RefreshStateResponse'] = _REFRESHSTATERESPONSE
DESCRIPTOR.message_types_by_name['RunHealthCheckRequest'] = _RUNHEALTHCHECKREQUEST
DESCRIPTOR.message_types_by_name['RunHealthCheckResponse'] = _RUNHEALTHCHECKRESPONSE
DESCRIPTOR.message_types_by_name['IgnoreHealthErrorRequest'] = _IGNOREHEALTHERRORREQUEST
DESCRIPTOR.message_types_by_name['IgnoreHealthErrorResponse'] = _IGNOREHEALTHERRORRESPONSE
DESCRIPTOR.message_types_by_name['ReloadSchemaRequest'] = _RELOADSCHEMAREQUEST
DESCRIPTOR.message_types_by_name['ReloadSchemaResponse'] = _RELOADSCHEMARESPONSE
DESCRIPTOR.message_types_by_name['PreflightSchemaRequest'] = _PREFLIGHTSCHEMAREQUEST
DESCRIPTOR.message_types_by_name['PreflightSchemaResponse'] = _PREFLIGHTSCHEMARESPONSE
DESCRIPTOR.message_types_by_name['ApplySchemaRequest'] = _APPLYSCHEMAREQUEST
DESCRIPTOR.message_types_by_name['ApplySchemaResponse'] = _APPLYSCHEMARESPONSE
DESCRIPTOR.message_types_by_name['LockTablesRequest'] = _LOCKTABLESREQUEST
DESCRIPTOR.message_types_by_name['LockTablesResponse'] = _LOCKTABLESRESPONSE
DESCRIPTOR.message_types_by_name['UnlockTablesRequest'] = _UNLOCKTABLESREQUEST
DESCRIPTOR.message_types_by_name['UnlockTablesResponse'] = _UNLOCKTABLESRESPONSE
DESCRIPTOR.message_types_by_name['ExecuteFetchAsDbaRequest'] = _EXECUTEFETCHASDBAREQUEST
DESCRIPTOR.message_types_by_name['ExecuteFetchAsDbaResponse'] = _EXECUTEFETCHASDBARESPONSE
DESCRIPTOR.message_types_by_name['ExecuteFetchAsAllPrivsRequest'] = _EXECUTEFETCHASALLPRIVSREQUEST
DESCRIPTOR.message_types_by_name['ExecuteFetchAsAllPrivsResponse'] = _EXECUTEFETCHASALLPRIVSRESPONSE
DESCRIPTOR.message_types_by_name['ExecuteFetchAsAppRequest'] = _EXECUTEFETCHASAPPREQUEST
DESCRIPTOR.message_types_by_name['ExecuteFetchAsAppResponse'] = _EXECUTEFETCHASAPPRESPONSE
DESCRIPTOR.message_types_by_name['SlaveStatusRequest'] = _SLAVESTATUSREQUEST
DESCRIPTOR.message_types_by_name['SlaveStatusResponse'] = _SLAVESTATUSRESPONSE
DESCRIPTOR.message_types_by_name['MasterPositionRequest'] = _MASTERPOSITIONREQUEST
DESCRIPTOR.message_types_by_name['MasterPositionResponse'] = _MASTERPOSITIONRESPONSE
DESCRIPTOR.message_types_by_name['StopSlaveRequest'] = _STOPSLAVEREQUEST
DESCRIPTOR.message_types_by_name['StopSlaveResponse'] = _STOPSLAVERESPONSE
DESCRIPTOR.message_types_by_name['StopSlaveMinimumRequest'] = _STOPSLAVEMINIMUMREQUEST
DESCRIPTOR.message_types_by_name['StopSlaveMinimumResponse'] = _STOPSLAVEMINIMUMRESPONSE
DESCRIPTOR.message_types_by_name['StartSlaveRequest'] = _STARTSLAVEREQUEST
DESCRIPTOR.message_types_by_name['StartSlaveResponse'] = _STARTSLAVERESPONSE
DESCRIPTOR.message_types_by_name['StartSlaveUntilAfterRequest'] = _STARTSLAVEUNTILAFTERREQUEST
DESCRIPTOR.message_types_by_name['StartSlaveUntilAfterResponse'] = _STARTSLAVEUNTILAFTERRESPONSE
DESCRIPTOR.message_types_by_name['TabletExternallyReparentedRequest'] = _TABLETEXTERNALLYREPARENTEDREQUEST
DESCRIPTOR.message_types_by_name['TabletExternallyReparentedResponse'] = _TABLETEXTERNALLYREPARENTEDRESPONSE
DESCRIPTOR.message_types_by_name['TabletExternallyElectedRequest'] = _TABLETEXTERNALLYELECTEDREQUEST
DESCRIPTOR.message_types_by_name['TabletExternallyElectedResponse'] = _TABLETEXTERNALLYELECTEDRESPONSE
DESCRIPTOR.message_types_by_name['GetSlavesRequest'] = _GETSLAVESREQUEST
DESCRIPTOR.message_types_by_name['GetSlavesResponse'] = _GETSLAVESRESPONSE
DESCRIPTOR.message_types_by_name['ResetReplicationRequest'] = _RESETREPLICATIONREQUEST
DESCRIPTOR.message_types_by_name['ResetReplicationResponse'] = _RESETREPLICATIONRESPONSE
DESCRIPTOR.message_types_by_name['VReplicationExecRequest'] = _VREPLICATIONEXECREQUEST
DESCRIPTOR.message_types_by_name['VReplicationExecResponse'] = _VREPLICATIONEXECRESPONSE
DESCRIPTOR.message_types_by_name['VReplicationWaitForPosRequest'] = _VREPLICATIONWAITFORPOSREQUEST
DESCRIPTOR.message_types_by_name['VReplicationWaitForPosResponse'] = _VREPLICATIONWAITFORPOSRESPONSE
DESCRIPTOR.message_types_by_name['InitMasterRequest'] = _INITMASTERREQUEST
DESCRIPTOR.message_types_by_name['InitMasterResponse'] = _INITMASTERRESPONSE
DESCRIPTOR.message_types_by_name['PopulateReparentJournalRequest'] = _POPULATEREPARENTJOURNALREQUEST
DESCRIPTOR.message_types_by_name['PopulateReparentJournalResponse'] = _POPULATEREPARENTJOURNALRESPONSE
DESCRIPTOR.message_types_by_name['InitSlaveRequest'] = _INITSLAVEREQUEST
DESCRIPTOR.message_types_by_name['InitSlaveResponse'] = _INITSLAVERESPONSE
DESCRIPTOR.message_types_by_name['DemoteMasterRequest'] = _DEMOTEMASTERREQUEST
DESCRIPTOR.message_types_by_name['DemoteMasterResponse'] = _DEMOTEMASTERRESPONSE
DESCRIPTOR.message_types_by_name['UndoDemoteMasterRequest'] = _UNDODEMOTEMASTERREQUEST
DESCRIPTOR.message_types_by_name['UndoDemoteMasterResponse'] = _UNDODEMOTEMASTERRESPONSE
DESCRIPTOR.message_types_by_name['PromoteSlaveWhenCaughtUpRequest'] = _PROMOTESLAVEWHENCAUGHTUPREQUEST
DESCRIPTOR.message_types_by_name['PromoteSlaveWhenCaughtUpResponse'] = _PROMOTESLAVEWHENCAUGHTUPRESPONSE
DESCRIPTOR.message_types_by_name['SlaveWasPromotedRequest'] = _SLAVEWASPROMOTEDREQUEST
DESCRIPTOR.message_types_by_name['SlaveWasPromotedResponse'] = _SLAVEWASPROMOTEDRESPONSE
DESCRIPTOR.message_types_by_name['SetMasterRequest'] = _SETMASTERREQUEST
DESCRIPTOR.message_types_by_name['SetMasterResponse'] = _SETMASTERRESPONSE
DESCRIPTOR.message_types_by_name['SlaveWasRestartedRequest'] = _SLAVEWASRESTARTEDREQUEST
DESCRIPTOR.message_types_by_name['SlaveWasRestartedResponse'] = _SLAVEWASRESTARTEDRESPONSE
DESCRIPTOR.message_types_by_name['StopReplicationAndGetStatusRequest'] = _STOPREPLICATIONANDGETSTATUSREQUEST
DESCRIPTOR.message_types_by_name['StopReplicationAndGetStatusResponse'] = _STOPREPLICATIONANDGETSTATUSRESPONSE
DESCRIPTOR.message_types_by_name['PromoteSlaveRequest'] = _PROMOTESLAVEREQUEST
DESCRIPTOR.message_types_by_name['PromoteSlaveResponse'] = _PROMOTESLAVERESPONSE
DESCRIPTOR.message_types_by_name['BackupRequest'] = _BACKUPREQUEST
DESCRIPTOR.message_types_by_name['BackupResponse'] = _BACKUPRESPONSE
DESCRIPTOR.message_types_by_name['RestoreFromBackupRequest'] = _RESTOREFROMBACKUPREQUEST
DESCRIPTOR.message_types_by_name['RestoreFromBackupResponse'] = _RESTOREFROMBACKUPRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TableDefinition = _reflection.GeneratedProtocolMessageType('TableDefinition', (_message.Message,), dict(
DESCRIPTOR = _TABLEDEFINITION,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.TableDefinition)
))
_sym_db.RegisterMessage(TableDefinition)
SchemaDefinition = _reflection.GeneratedProtocolMessageType('SchemaDefinition', (_message.Message,), dict(
DESCRIPTOR = _SCHEMADEFINITION,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.SchemaDefinition)
))
_sym_db.RegisterMessage(SchemaDefinition)
SchemaChangeResult = _reflection.GeneratedProtocolMessageType('SchemaChangeResult', (_message.Message,), dict(
DESCRIPTOR = _SCHEMACHANGERESULT,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.SchemaChangeResult)
))
_sym_db.RegisterMessage(SchemaChangeResult)
UserPermission = _reflection.GeneratedProtocolMessageType('UserPermission', (_message.Message,), dict(
PrivilegesEntry = _reflection.GeneratedProtocolMessageType('PrivilegesEntry', (_message.Message,), dict(
DESCRIPTOR = _USERPERMISSION_PRIVILEGESENTRY,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.UserPermission.PrivilegesEntry)
))
,
DESCRIPTOR = _USERPERMISSION,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.UserPermission)
))
_sym_db.RegisterMessage(UserPermission)
_sym_db.RegisterMessage(UserPermission.PrivilegesEntry)
DbPermission = _reflection.GeneratedProtocolMessageType('DbPermission', (_message.Message,), dict(
PrivilegesEntry = _reflection.GeneratedProtocolMessageType('PrivilegesEntry', (_message.Message,), dict(
DESCRIPTOR = _DBPERMISSION_PRIVILEGESENTRY,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.DbPermission.PrivilegesEntry)
))
,
DESCRIPTOR = _DBPERMISSION,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.DbPermission)
))
_sym_db.RegisterMessage(DbPermission)
_sym_db.RegisterMessage(DbPermission.PrivilegesEntry)
Permissions = _reflection.GeneratedProtocolMessageType('Permissions', (_message.Message,), dict(
DESCRIPTOR = _PERMISSIONS,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.Permissions)
))
_sym_db.RegisterMessage(Permissions)
PingRequest = _reflection.GeneratedProtocolMessageType('PingRequest', (_message.Message,), dict(
DESCRIPTOR = _PINGREQUEST,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.PingRequest)
))
_sym_db.RegisterMessage(PingRequest)
PingResponse = _reflection.GeneratedProtocolMessageType('PingResponse', (_message.Message,), dict(
DESCRIPTOR = _PINGRESPONSE,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.PingResponse)
))
_sym_db.RegisterMessage(PingResponse)
SleepRequest = _reflection.GeneratedProtocolMessageType('SleepRequest', (_message.Message,), dict(
DESCRIPTOR = _SLEEPREQUEST,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.SleepRequest)
))
_sym_db.RegisterMessage(SleepRequest)
SleepResponse = _reflection.GeneratedProtocolMessageType('SleepResponse', (_message.Message,), dict(
DESCRIPTOR = _SLEEPRESPONSE,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.SleepResponse)
))
_sym_db.RegisterMessage(SleepResponse)
ExecuteHookRequest = _reflection.GeneratedProtocolMessageType('ExecuteHookRequest', (_message.Message,), dict(
ExtraEnvEntry = _reflection.GeneratedProtocolMessageType('ExtraEnvEntry', (_message.Message,), dict(
DESCRIPTOR = _EXECUTEHOOKREQUEST_EXTRAENVENTRY,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.ExecuteHookRequest.ExtraEnvEntry)
))
,
DESCRIPTOR = _EXECUTEHOOKREQUEST,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.ExecuteHookRequest)
))
_sym_db.RegisterMessage(ExecuteHookRequest)
_sym_db.RegisterMessage(ExecuteHookRequest.ExtraEnvEntry)
ExecuteHookResponse = _reflection.GeneratedProtocolMessageType('ExecuteHookResponse', (_message.Message,), dict(
DESCRIPTOR = _EXECUTEHOOKRESPONSE,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.ExecuteHookResponse)
))
_sym_db.RegisterMessage(ExecuteHookResponse)
GetSchemaRequest = _reflection.GeneratedProtocolMessageType('GetSchemaRequest', (_message.Message,), dict(
DESCRIPTOR = _GETSCHEMAREQUEST,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.GetSchemaRequest)
))
_sym_db.RegisterMessage(GetSchemaRequest)
GetSchemaResponse = _reflection.GeneratedProtocolMessageType('GetSchemaResponse', (_message.Message,), dict(
DESCRIPTOR = _GETSCHEMARESPONSE,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.GetSchemaResponse)
))
_sym_db.RegisterMessage(GetSchemaResponse)
GetPermissionsRequest = _reflection.GeneratedProtocolMessageType('GetPermissionsRequest', (_message.Message,), dict(
DESCRIPTOR = _GETPERMISSIONSREQUEST,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.GetPermissionsRequest)
))
_sym_db.RegisterMessage(GetPermissionsRequest)
GetPermissionsResponse = _reflection.GeneratedProtocolMessageType('GetPermissionsResponse', (_message.Message,), dict(
DESCRIPTOR = _GETPERMISSIONSRESPONSE,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.GetPermissionsResponse)
))
_sym_db.RegisterMessage(GetPermissionsResponse)
SetReadOnlyRequest = _reflection.GeneratedProtocolMessageType('SetReadOnlyRequest', (_message.Message,), dict(
DESCRIPTOR = _SETREADONLYREQUEST,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.SetReadOnlyRequest)
))
_sym_db.RegisterMessage(SetReadOnlyRequest)
SetReadOnlyResponse = _reflection.GeneratedProtocolMessageType('SetReadOnlyResponse', (_message.Message,), dict(
DESCRIPTOR = _SETREADONLYRESPONSE,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.SetReadOnlyResponse)
))
_sym_db.RegisterMessage(SetReadOnlyResponse)
SetReadWriteRequest = _reflection.GeneratedProtocolMessageType('SetReadWriteRequest', (_message.Message,), dict(
DESCRIPTOR = _SETREADWRITEREQUEST,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.SetReadWriteRequest)
))
_sym_db.RegisterMessage(SetReadWriteRequest)
SetReadWriteResponse = _reflection.GeneratedProtocolMessageType('SetReadWriteResponse', (_message.Message,), dict(
DESCRIPTOR = _SETREADWRITERESPONSE,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.SetReadWriteResponse)
))
_sym_db.RegisterMessage(SetReadWriteResponse)
ChangeTypeRequest = _reflection.GeneratedProtocolMessageType('ChangeTypeRequest', (_message.Message,), dict(
DESCRIPTOR = _CHANGETYPEREQUEST,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.ChangeTypeRequest)
))
_sym_db.RegisterMessage(ChangeTypeRequest)
ChangeTypeResponse = _reflection.GeneratedProtocolMessageType('ChangeTypeResponse', (_message.Message,), dict(
DESCRIPTOR = _CHANGETYPERESPONSE,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.ChangeTypeResponse)
))
_sym_db.RegisterMessage(ChangeTypeResponse)
RefreshStateRequest = _reflection.GeneratedProtocolMessageType('RefreshStateRequest', (_message.Message,), dict(
DESCRIPTOR = _REFRESHSTATEREQUEST,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.RefreshStateRequest)
))
_sym_db.RegisterMessage(RefreshStateRequest)
RefreshStateResponse = _reflection.GeneratedProtocolMessageType('RefreshStateResponse', (_message.Message,), dict(
DESCRIPTOR = _REFRESHSTATERESPONSE,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.RefreshStateResponse)
))
_sym_db.RegisterMessage(RefreshStateResponse)
RunHealthCheckRequest = _reflection.GeneratedProtocolMessageType('RunHealthCheckRequest', (_message.Message,), dict(
DESCRIPTOR = _RUNHEALTHCHECKREQUEST,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.RunHealthCheckRequest)
))
_sym_db.RegisterMessage(RunHealthCheckRequest)
RunHealthCheckResponse = _reflection.GeneratedProtocolMessageType('RunHealthCheckResponse', (_message.Message,), dict(
DESCRIPTOR = _RUNHEALTHCHECKRESPONSE,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.RunHealthCheckResponse)
))
_sym_db.RegisterMessage(RunHealthCheckResponse)
IgnoreHealthErrorRequest = _reflection.GeneratedProtocolMessageType('IgnoreHealthErrorRequest', (_message.Message,), dict(
DESCRIPTOR = _IGNOREHEALTHERRORREQUEST,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.IgnoreHealthErrorRequest)
))
_sym_db.RegisterMessage(IgnoreHealthErrorRequest)
IgnoreHealthErrorResponse = _reflection.GeneratedProtocolMessageType('IgnoreHealthErrorResponse', (_message.Message,), dict(
DESCRIPTOR = _IGNOREHEALTHERRORRESPONSE,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.IgnoreHealthErrorResponse)
))
_sym_db.RegisterMessage(IgnoreHealthErrorResponse)
ReloadSchemaRequest = _reflection.GeneratedProtocolMessageType('ReloadSchemaRequest', (_message.Message,), dict(
DESCRIPTOR = _RELOADSCHEMAREQUEST,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.ReloadSchemaRequest)
))
_sym_db.RegisterMessage(ReloadSchemaRequest)
ReloadSchemaResponse = _reflection.GeneratedProtocolMessageType('ReloadSchemaResponse', (_message.Message,), dict(
DESCRIPTOR = _RELOADSCHEMARESPONSE,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.ReloadSchemaResponse)
))
_sym_db.RegisterMessage(ReloadSchemaResponse)
PreflightSchemaRequest = _reflection.GeneratedProtocolMessageType('PreflightSchemaRequest', (_message.Message,), dict(
DESCRIPTOR = _PREFLIGHTSCHEMAREQUEST,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.PreflightSchemaRequest)
))
_sym_db.RegisterMessage(PreflightSchemaRequest)
PreflightSchemaResponse = _reflection.GeneratedProtocolMessageType('PreflightSchemaResponse', (_message.Message,), dict(
DESCRIPTOR = _PREFLIGHTSCHEMARESPONSE,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.PreflightSchemaResponse)
))
_sym_db.RegisterMessage(PreflightSchemaResponse)
ApplySchemaRequest = _reflection.GeneratedProtocolMessageType('ApplySchemaRequest', (_message.Message,), dict(
DESCRIPTOR = _APPLYSCHEMAREQUEST,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.ApplySchemaRequest)
))
_sym_db.RegisterMessage(ApplySchemaRequest)
ApplySchemaResponse = _reflection.GeneratedProtocolMessageType('ApplySchemaResponse', (_message.Message,), dict(
DESCRIPTOR = _APPLYSCHEMARESPONSE,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.ApplySchemaResponse)
))
_sym_db.RegisterMessage(ApplySchemaResponse)
LockTablesRequest = _reflection.GeneratedProtocolMessageType('LockTablesRequest', (_message.Message,), dict(
DESCRIPTOR = _LOCKTABLESREQUEST,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.LockTablesRequest)
))
_sym_db.RegisterMessage(LockTablesRequest)
LockTablesResponse = _reflection.GeneratedProtocolMessageType('LockTablesResponse', (_message.Message,), dict(
DESCRIPTOR = _LOCKTABLESRESPONSE,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.LockTablesResponse)
))
_sym_db.RegisterMessage(LockTablesResponse)
UnlockTablesRequest = _reflection.GeneratedProtocolMessageType('UnlockTablesRequest', (_message.Message,), dict(
DESCRIPTOR = _UNLOCKTABLESREQUEST,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.UnlockTablesRequest)
))
_sym_db.RegisterMessage(UnlockTablesRequest)
UnlockTablesResponse = _reflection.GeneratedProtocolMessageType('UnlockTablesResponse', (_message.Message,), dict(
DESCRIPTOR = _UNLOCKTABLESRESPONSE,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.UnlockTablesResponse)
))
_sym_db.RegisterMessage(UnlockTablesResponse)
ExecuteFetchAsDbaRequest = _reflection.GeneratedProtocolMessageType('ExecuteFetchAsDbaRequest', (_message.Message,), dict(
DESCRIPTOR = _EXECUTEFETCHASDBAREQUEST,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.ExecuteFetchAsDbaRequest)
))
_sym_db.RegisterMessage(ExecuteFetchAsDbaRequest)
ExecuteFetchAsDbaResponse = _reflection.GeneratedProtocolMessageType('ExecuteFetchAsDbaResponse', (_message.Message,), dict(
DESCRIPTOR = _EXECUTEFETCHASDBARESPONSE,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.ExecuteFetchAsDbaResponse)
))
_sym_db.RegisterMessage(ExecuteFetchAsDbaResponse)
ExecuteFetchAsAllPrivsRequest = _reflection.GeneratedProtocolMessageType('ExecuteFetchAsAllPrivsRequest', (_message.Message,), dict(
DESCRIPTOR = _EXECUTEFETCHASALLPRIVSREQUEST,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.ExecuteFetchAsAllPrivsRequest)
))
_sym_db.RegisterMessage(ExecuteFetchAsAllPrivsRequest)
ExecuteFetchAsAllPrivsResponse = _reflection.GeneratedProtocolMessageType('ExecuteFetchAsAllPrivsResponse', (_message.Message,), dict(
DESCRIPTOR = _EXECUTEFETCHASALLPRIVSRESPONSE,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.ExecuteFetchAsAllPrivsResponse)
))
_sym_db.RegisterMessage(ExecuteFetchAsAllPrivsResponse)
ExecuteFetchAsAppRequest = _reflection.GeneratedProtocolMessageType('ExecuteFetchAsAppRequest', (_message.Message,), dict(
DESCRIPTOR = _EXECUTEFETCHASAPPREQUEST,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.ExecuteFetchAsAppRequest)
))
_sym_db.RegisterMessage(ExecuteFetchAsAppRequest)
ExecuteFetchAsAppResponse = _reflection.GeneratedProtocolMessageType('ExecuteFetchAsAppResponse', (_message.Message,), dict(
DESCRIPTOR = _EXECUTEFETCHASAPPRESPONSE,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.ExecuteFetchAsAppResponse)
))
_sym_db.RegisterMessage(ExecuteFetchAsAppResponse)
SlaveStatusRequest = _reflection.GeneratedProtocolMessageType('SlaveStatusRequest', (_message.Message,), dict(
DESCRIPTOR = _SLAVESTATUSREQUEST,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.SlaveStatusRequest)
))
_sym_db.RegisterMessage(SlaveStatusRequest)
SlaveStatusResponse = _reflection.GeneratedProtocolMessageType('SlaveStatusResponse', (_message.Message,), dict(
DESCRIPTOR = _SLAVESTATUSRESPONSE,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.SlaveStatusResponse)
))
_sym_db.RegisterMessage(SlaveStatusResponse)
MasterPositionRequest = _reflection.GeneratedProtocolMessageType('MasterPositionRequest', (_message.Message,), dict(
DESCRIPTOR = _MASTERPOSITIONREQUEST,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.MasterPositionRequest)
))
_sym_db.RegisterMessage(MasterPositionRequest)
MasterPositionResponse = _reflection.GeneratedProtocolMessageType('MasterPositionResponse', (_message.Message,), dict(
DESCRIPTOR = _MASTERPOSITIONRESPONSE,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.MasterPositionResponse)
))
_sym_db.RegisterMessage(MasterPositionResponse)
StopSlaveRequest = _reflection.GeneratedProtocolMessageType('StopSlaveRequest', (_message.Message,), dict(
DESCRIPTOR = _STOPSLAVEREQUEST,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.StopSlaveRequest)
))
_sym_db.RegisterMessage(StopSlaveRequest)
StopSlaveResponse = _reflection.GeneratedProtocolMessageType('StopSlaveResponse', (_message.Message,), dict(
DESCRIPTOR = _STOPSLAVERESPONSE,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.StopSlaveResponse)
))
_sym_db.RegisterMessage(StopSlaveResponse)
StopSlaveMinimumRequest = _reflection.GeneratedProtocolMessageType('StopSlaveMinimumRequest', (_message.Message,), dict(
DESCRIPTOR = _STOPSLAVEMINIMUMREQUEST,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.StopSlaveMinimumRequest)
))
_sym_db.RegisterMessage(StopSlaveMinimumRequest)
StopSlaveMinimumResponse = _reflection.GeneratedProtocolMessageType('StopSlaveMinimumResponse', (_message.Message,), dict(
DESCRIPTOR = _STOPSLAVEMINIMUMRESPONSE,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.StopSlaveMinimumResponse)
))
_sym_db.RegisterMessage(StopSlaveMinimumResponse)
StartSlaveRequest = _reflection.GeneratedProtocolMessageType('StartSlaveRequest', (_message.Message,), dict(
DESCRIPTOR = _STARTSLAVEREQUEST,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.StartSlaveRequest)
))
_sym_db.RegisterMessage(StartSlaveRequest)
StartSlaveResponse = _reflection.GeneratedProtocolMessageType('StartSlaveResponse', (_message.Message,), dict(
DESCRIPTOR = _STARTSLAVERESPONSE,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.StartSlaveResponse)
))
_sym_db.RegisterMessage(StartSlaveResponse)
StartSlaveUntilAfterRequest = _reflection.GeneratedProtocolMessageType('StartSlaveUntilAfterRequest', (_message.Message,), dict(
DESCRIPTOR = _STARTSLAVEUNTILAFTERREQUEST,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.StartSlaveUntilAfterRequest)
))
_sym_db.RegisterMessage(StartSlaveUntilAfterRequest)
StartSlaveUntilAfterResponse = _reflection.GeneratedProtocolMessageType('StartSlaveUntilAfterResponse', (_message.Message,), dict(
DESCRIPTOR = _STARTSLAVEUNTILAFTERRESPONSE,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.StartSlaveUntilAfterResponse)
))
_sym_db.RegisterMessage(StartSlaveUntilAfterResponse)
TabletExternallyReparentedRequest = _reflection.GeneratedProtocolMessageType('TabletExternallyReparentedRequest', (_message.Message,), dict(
DESCRIPTOR = _TABLETEXTERNALLYREPARENTEDREQUEST,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.TabletExternallyReparentedRequest)
))
_sym_db.RegisterMessage(TabletExternallyReparentedRequest)
TabletExternallyReparentedResponse = _reflection.GeneratedProtocolMessageType('TabletExternallyReparentedResponse', (_message.Message,), dict(
DESCRIPTOR = _TABLETEXTERNALLYREPARENTEDRESPONSE,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.TabletExternallyReparentedResponse)
))
_sym_db.RegisterMessage(TabletExternallyReparentedResponse)
TabletExternallyElectedRequest = _reflection.GeneratedProtocolMessageType('TabletExternallyElectedRequest', (_message.Message,), dict(
DESCRIPTOR = _TABLETEXTERNALLYELECTEDREQUEST,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.TabletExternallyElectedRequest)
))
_sym_db.RegisterMessage(TabletExternallyElectedRequest)
TabletExternallyElectedResponse = _reflection.GeneratedProtocolMessageType('TabletExternallyElectedResponse', (_message.Message,), dict(
DESCRIPTOR = _TABLETEXTERNALLYELECTEDRESPONSE,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.TabletExternallyElectedResponse)
))
_sym_db.RegisterMessage(TabletExternallyElectedResponse)
GetSlavesRequest = _reflection.GeneratedProtocolMessageType('GetSlavesRequest', (_message.Message,), dict(
DESCRIPTOR = _GETSLAVESREQUEST,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.GetSlavesRequest)
))
_sym_db.RegisterMessage(GetSlavesRequest)
GetSlavesResponse = _reflection.GeneratedProtocolMessageType('GetSlavesResponse', (_message.Message,), dict(
DESCRIPTOR = _GETSLAVESRESPONSE,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.GetSlavesResponse)
))
_sym_db.RegisterMessage(GetSlavesResponse)
ResetReplicationRequest = _reflection.GeneratedProtocolMessageType('ResetReplicationRequest', (_message.Message,), dict(
DESCRIPTOR = _RESETREPLICATIONREQUEST,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.ResetReplicationRequest)
))
_sym_db.RegisterMessage(ResetReplicationRequest)
ResetReplicationResponse = _reflection.GeneratedProtocolMessageType('ResetReplicationResponse', (_message.Message,), dict(
DESCRIPTOR = _RESETREPLICATIONRESPONSE,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.ResetReplicationResponse)
))
_sym_db.RegisterMessage(ResetReplicationResponse)
VReplicationExecRequest = _reflection.GeneratedProtocolMessageType('VReplicationExecRequest', (_message.Message,), dict(
DESCRIPTOR = _VREPLICATIONEXECREQUEST,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.VReplicationExecRequest)
))
_sym_db.RegisterMessage(VReplicationExecRequest)
VReplicationExecResponse = _reflection.GeneratedProtocolMessageType('VReplicationExecResponse', (_message.Message,), dict(
DESCRIPTOR = _VREPLICATIONEXECRESPONSE,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.VReplicationExecResponse)
))
_sym_db.RegisterMessage(VReplicationExecResponse)
VReplicationWaitForPosRequest = _reflection.GeneratedProtocolMessageType('VReplicationWaitForPosRequest', (_message.Message,), dict(
DESCRIPTOR = _VREPLICATIONWAITFORPOSREQUEST,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.VReplicationWaitForPosRequest)
))
_sym_db.RegisterMessage(VReplicationWaitForPosRequest)
VReplicationWaitForPosResponse = _reflection.GeneratedProtocolMessageType('VReplicationWaitForPosResponse', (_message.Message,), dict(
DESCRIPTOR = _VREPLICATIONWAITFORPOSRESPONSE,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.VReplicationWaitForPosResponse)
))
_sym_db.RegisterMessage(VReplicationWaitForPosResponse)
InitMasterRequest = _reflection.GeneratedProtocolMessageType('InitMasterRequest', (_message.Message,), dict(
DESCRIPTOR = _INITMASTERREQUEST,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.InitMasterRequest)
))
_sym_db.RegisterMessage(InitMasterRequest)
InitMasterResponse = _reflection.GeneratedProtocolMessageType('InitMasterResponse', (_message.Message,), dict(
DESCRIPTOR = _INITMASTERRESPONSE,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.InitMasterResponse)
))
_sym_db.RegisterMessage(InitMasterResponse)
PopulateReparentJournalRequest = _reflection.GeneratedProtocolMessageType('PopulateReparentJournalRequest', (_message.Message,), dict(
DESCRIPTOR = _POPULATEREPARENTJOURNALREQUEST,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.PopulateReparentJournalRequest)
))
_sym_db.RegisterMessage(PopulateReparentJournalRequest)
PopulateReparentJournalResponse = _reflection.GeneratedProtocolMessageType('PopulateReparentJournalResponse', (_message.Message,), dict(
DESCRIPTOR = _POPULATEREPARENTJOURNALRESPONSE,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.PopulateReparentJournalResponse)
))
_sym_db.RegisterMessage(PopulateReparentJournalResponse)
InitSlaveRequest = _reflection.GeneratedProtocolMessageType('InitSlaveRequest', (_message.Message,), dict(
DESCRIPTOR = _INITSLAVEREQUEST,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.InitSlaveRequest)
))
_sym_db.RegisterMessage(InitSlaveRequest)
InitSlaveResponse = _reflection.GeneratedProtocolMessageType('InitSlaveResponse', (_message.Message,), dict(
DESCRIPTOR = _INITSLAVERESPONSE,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.InitSlaveResponse)
))
_sym_db.RegisterMessage(InitSlaveResponse)
DemoteMasterRequest = _reflection.GeneratedProtocolMessageType('DemoteMasterRequest', (_message.Message,), dict(
DESCRIPTOR = _DEMOTEMASTERREQUEST,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.DemoteMasterRequest)
))
_sym_db.RegisterMessage(DemoteMasterRequest)
DemoteMasterResponse = _reflection.GeneratedProtocolMessageType('DemoteMasterResponse', (_message.Message,), dict(
DESCRIPTOR = _DEMOTEMASTERRESPONSE,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.DemoteMasterResponse)
))
_sym_db.RegisterMessage(DemoteMasterResponse)
UndoDemoteMasterRequest = _reflection.GeneratedProtocolMessageType('UndoDemoteMasterRequest', (_message.Message,), dict(
DESCRIPTOR = _UNDODEMOTEMASTERREQUEST,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.UndoDemoteMasterRequest)
))
_sym_db.RegisterMessage(UndoDemoteMasterRequest)
UndoDemoteMasterResponse = _reflection.GeneratedProtocolMessageType('UndoDemoteMasterResponse', (_message.Message,), dict(
DESCRIPTOR = _UNDODEMOTEMASTERRESPONSE,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.UndoDemoteMasterResponse)
))
_sym_db.RegisterMessage(UndoDemoteMasterResponse)
PromoteSlaveWhenCaughtUpRequest = _reflection.GeneratedProtocolMessageType('PromoteSlaveWhenCaughtUpRequest', (_message.Message,), dict(
DESCRIPTOR = _PROMOTESLAVEWHENCAUGHTUPREQUEST,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.PromoteSlaveWhenCaughtUpRequest)
))
_sym_db.RegisterMessage(PromoteSlaveWhenCaughtUpRequest)
PromoteSlaveWhenCaughtUpResponse = _reflection.GeneratedProtocolMessageType('PromoteSlaveWhenCaughtUpResponse', (_message.Message,), dict(
DESCRIPTOR = _PROMOTESLAVEWHENCAUGHTUPRESPONSE,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.PromoteSlaveWhenCaughtUpResponse)
))
_sym_db.RegisterMessage(PromoteSlaveWhenCaughtUpResponse)
SlaveWasPromotedRequest = _reflection.GeneratedProtocolMessageType('SlaveWasPromotedRequest', (_message.Message,), dict(
DESCRIPTOR = _SLAVEWASPROMOTEDREQUEST,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.SlaveWasPromotedRequest)
))
_sym_db.RegisterMessage(SlaveWasPromotedRequest)
SlaveWasPromotedResponse = _reflection.GeneratedProtocolMessageType('SlaveWasPromotedResponse', (_message.Message,), dict(
DESCRIPTOR = _SLAVEWASPROMOTEDRESPONSE,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.SlaveWasPromotedResponse)
))
_sym_db.RegisterMessage(SlaveWasPromotedResponse)
SetMasterRequest = _reflection.GeneratedProtocolMessageType('SetMasterRequest', (_message.Message,), dict(
DESCRIPTOR = _SETMASTERREQUEST,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.SetMasterRequest)
))
_sym_db.RegisterMessage(SetMasterRequest)
SetMasterResponse = _reflection.GeneratedProtocolMessageType('SetMasterResponse', (_message.Message,), dict(
DESCRIPTOR = _SETMASTERRESPONSE,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.SetMasterResponse)
))
_sym_db.RegisterMessage(SetMasterResponse)
SlaveWasRestartedRequest = _reflection.GeneratedProtocolMessageType('SlaveWasRestartedRequest', (_message.Message,), dict(
DESCRIPTOR = _SLAVEWASRESTARTEDREQUEST,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.SlaveWasRestartedRequest)
))
_sym_db.RegisterMessage(SlaveWasRestartedRequest)
SlaveWasRestartedResponse = _reflection.GeneratedProtocolMessageType('SlaveWasRestartedResponse', (_message.Message,), dict(
DESCRIPTOR = _SLAVEWASRESTARTEDRESPONSE,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.SlaveWasRestartedResponse)
))
_sym_db.RegisterMessage(SlaveWasRestartedResponse)
StopReplicationAndGetStatusRequest = _reflection.GeneratedProtocolMessageType('StopReplicationAndGetStatusRequest', (_message.Message,), dict(
DESCRIPTOR = _STOPREPLICATIONANDGETSTATUSREQUEST,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.StopReplicationAndGetStatusRequest)
))
_sym_db.RegisterMessage(StopReplicationAndGetStatusRequest)
StopReplicationAndGetStatusResponse = _reflection.GeneratedProtocolMessageType('StopReplicationAndGetStatusResponse', (_message.Message,), dict(
DESCRIPTOR = _STOPREPLICATIONANDGETSTATUSRESPONSE,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.StopReplicationAndGetStatusResponse)
))
_sym_db.RegisterMessage(StopReplicationAndGetStatusResponse)
PromoteSlaveRequest = _reflection.GeneratedProtocolMessageType('PromoteSlaveRequest', (_message.Message,), dict(
DESCRIPTOR = _PROMOTESLAVEREQUEST,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.PromoteSlaveRequest)
))
_sym_db.RegisterMessage(PromoteSlaveRequest)
PromoteSlaveResponse = _reflection.GeneratedProtocolMessageType('PromoteSlaveResponse', (_message.Message,), dict(
DESCRIPTOR = _PROMOTESLAVERESPONSE,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.PromoteSlaveResponse)
))
_sym_db.RegisterMessage(PromoteSlaveResponse)
BackupRequest = _reflection.GeneratedProtocolMessageType('BackupRequest', (_message.Message,), dict(
DESCRIPTOR = _BACKUPREQUEST,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.BackupRequest)
))
_sym_db.RegisterMessage(BackupRequest)
BackupResponse = _reflection.GeneratedProtocolMessageType('BackupResponse', (_message.Message,), dict(
DESCRIPTOR = _BACKUPRESPONSE,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.BackupResponse)
))
_sym_db.RegisterMessage(BackupResponse)
RestoreFromBackupRequest = _reflection.GeneratedProtocolMessageType('RestoreFromBackupRequest', (_message.Message,), dict(
DESCRIPTOR = _RESTOREFROMBACKUPREQUEST,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.RestoreFromBackupRequest)
))
_sym_db.RegisterMessage(RestoreFromBackupRequest)
RestoreFromBackupResponse = _reflection.GeneratedProtocolMessageType('RestoreFromBackupResponse', (_message.Message,), dict(
DESCRIPTOR = _RESTOREFROMBACKUPRESPONSE,
__module__ = 'tabletmanagerdata_pb2'
# @@protoc_insertion_point(class_scope:tabletmanagerdata.RestoreFromBackupResponse)
))
_sym_db.RegisterMessage(RestoreFromBackupResponse)
DESCRIPTOR._options = None
_USERPERMISSION_PRIVILEGESENTRY._options = None
_DBPERMISSION_PRIVILEGESENTRY._options = None
_EXECUTEHOOKREQUEST_EXTRAENVENTRY._options = None
# @@protoc_insertion_point(module_scope)
|
drano/django-mailer | refs/heads/master | mailer/engine.py | 3 | import time
import smtplib
import logging
from lockfile import FileLock, AlreadyLocked, LockTimeout
from socket import error as socket_error
from django.conf import settings
from django.core.mail import send_mail as core_send_mail
from django.db import transaction
try:
# Django 1.2
from django.core.mail import get_connection
except ImportError:
# ImportError: cannot import name get_connection
from django.core.mail import SMTPConnection
get_connection = lambda backend=None, fail_silently=False, **kwds: SMTPConnection(fail_silently=fail_silently)
from mailer.models import Message, DontSendEntry, MessageLog
# when queue is empty, how long to wait (in seconds) before checking again
EMPTY_QUEUE_SLEEP = getattr(settings, "MAILER_EMPTY_QUEUE_SLEEP", 30)
# lock timeout value. how long to wait for the lock to become available.
# default behavior is to never wait for the lock to be available.
LOCK_WAIT_TIMEOUT = getattr(settings, "MAILER_LOCK_WAIT_TIMEOUT", -1)
# The actual backend to use for sending, defaulting to the Django default.
EMAIL_BACKEND = getattr(settings, "MAILER_EMAIL_BACKEND", "django.core.mail.backends.smtp.EmailBackend")
def prioritize():
"""
Yield the messages in the queue in the order they should be sent.
"""
while True:
try:
yield Message.objects.non_deferred().order_by(
"priority", "when_added")[0]
except IndexError:
# the [0] ref was out of range, so we're done with messages
break
@transaction.commit_on_success
def mark_as_sent(message):
"""
Mark the given message as sent in the log and delete the original item.
"""
MessageLog.objects.log(message, 1) # @@@ avoid using literal result code
message.delete()
@transaction.commit_on_success
def mark_as_deferred(message, err=None):
"""
Mark the given message as deferred in the log and adjust the mail item
accordingly.
"""
message.defer()
logging.info("message deferred due to failure: %s" % err)
MessageLog.objects.log(message, 3, log_message=str(err)) # @@@ avoid using literal result code
def send_all():
"""
Send all eligible messages in the queue.
"""
lock = FileLock("send_mail")
logging.debug("acquiring lock...")
try:
lock.acquire(LOCK_WAIT_TIMEOUT)
except AlreadyLocked:
logging.debug("lock already in place. quitting.")
return
except LockTimeout:
logging.debug("waiting for the lock timed out. quitting.")
return
logging.debug("acquired.")
start_time = time.time()
dont_send = 0
deferred = 0
sent = 0
try:
connection = None
lastConnectionArgs = None
for message in prioritize():
try:
#Check to see if we can reuse the last connection - except the password (we assume they're the same if user is the same)
if (connection is None) or (lastConnectionArgs != message.connection_kwargs):
#Connection doesn't exist or doesn't match, build it
if message.connection_kwargs:
connection = get_connection(backend=EMAIL_BACKEND, **message.connection_kwargs)
else:
connection = get_connection(backend=EMAIL_BACKEND)
#save the new args - even if they're empty
lastConnectionArgs = message.connection_kwargs
logging.info("sending message '%s' to %s" % (message.subject.encode("utf-8"), message.to_addresses.encode("utf-8")))
email = message.email
email.connection = connection
email.send()
mark_as_sent(message)
sent += 1
except (socket_error, smtplib.SMTPSenderRefused, smtplib.SMTPRecipientsRefused, smtplib.SMTPAuthenticationError), err:
mark_as_deferred(message, err)
deferred += 1
# Get new connection, it case the connection itself has an error.
connection = None
finally:
logging.debug("releasing lock...")
lock.release()
logging.debug("released.")
logging.info("")
logging.info("%s sent; %s deferred;" % (sent, deferred))
logging.info("done in %.2f seconds" % (time.time() - start_time))
def send_loop():
"""
Loop indefinitely, checking queue at intervals of EMPTY_QUEUE_SLEEP and
sending messages if any are on queue.
"""
while True:
while not Message.objects.all():
logging.debug("sleeping for %s seconds before checking queue again" % EMPTY_QUEUE_SLEEP)
time.sleep(EMPTY_QUEUE_SLEEP)
send_all()
|
MalloyPower/parsing-python | refs/heads/master | front-end/testsuite-python-lib/Python-2.4/Lib/multifile.py | 103 | """A readline()-style interface to the parts of a multipart message.
The MultiFile class makes each part of a multipart message "feel" like
an ordinary file, as long as you use fp.readline(). Allows recursive
use, for nested multipart messages. Probably best used together
with module mimetools.
Suggested use:
real_fp = open(...)
fp = MultiFile(real_fp)
"read some lines from fp"
fp.push(separator)
while 1:
"read lines from fp until it returns an empty string" (A)
if not fp.next(): break
fp.pop()
"read remaining lines from fp until it returns an empty string"
The latter sequence may be used recursively at (A).
It is also allowed to use multiple push()...pop() sequences.
If seekable is given as 0, the class code will not do the bookkeeping
it normally attempts in order to make seeks relative to the beginning of the
current file part. This may be useful when using MultiFile with a non-
seekable stream object.
"""
__all__ = ["MultiFile","Error"]
class Error(Exception):
pass
class MultiFile:
seekable = 0
def __init__(self, fp, seekable=1):
self.fp = fp
self.stack = []
self.level = 0
self.last = 0
if seekable:
self.seekable = 1
self.start = self.fp.tell()
self.posstack = []
def tell(self):
if self.level > 0:
return self.lastpos
return self.fp.tell() - self.start
def seek(self, pos, whence=0):
here = self.tell()
if whence:
if whence == 1:
pos = pos + here
elif whence == 2:
if self.level > 0:
pos = pos + self.lastpos
else:
raise Error, "can't use whence=2 yet"
if not 0 <= pos <= here or \
self.level > 0 and pos > self.lastpos:
raise Error, 'bad MultiFile.seek() call'
self.fp.seek(pos + self.start)
self.level = 0
self.last = 0
def readline(self):
if self.level > 0:
return ''
line = self.fp.readline()
# Real EOF?
if not line:
self.level = len(self.stack)
self.last = (self.level > 0)
if self.last:
raise Error, 'sudden EOF in MultiFile.readline()'
return ''
assert self.level == 0
# Fast check to see if this is just data
if self.is_data(line):
return line
else:
# Ignore trailing whitespace on marker lines
marker = line.rstrip()
# No? OK, try to match a boundary.
# Return the line (unstripped) if we don't.
for i, sep in enumerate(reversed(self.stack)):
if marker == self.section_divider(sep):
self.last = 0
break
elif marker == self.end_marker(sep):
self.last = 1
break
else:
return line
# We only get here if we see a section divider or EOM line
if self.seekable:
self.lastpos = self.tell() - len(line)
self.level = i+1
if self.level > 1:
raise Error,'Missing endmarker in MultiFile.readline()'
return ''
def readlines(self):
list = []
while 1:
line = self.readline()
if not line: break
list.append(line)
return list
def read(self): # Note: no size argument -- read until EOF only!
return ''.join(self.readlines())
def next(self):
while self.readline(): pass
if self.level > 1 or self.last:
return 0
self.level = 0
self.last = 0
if self.seekable:
self.start = self.fp.tell()
return 1
def push(self, sep):
if self.level > 0:
raise Error, 'bad MultiFile.push() call'
self.stack.append(sep)
if self.seekable:
self.posstack.append(self.start)
self.start = self.fp.tell()
def pop(self):
if self.stack == []:
raise Error, 'bad MultiFile.pop() call'
if self.level <= 1:
self.last = 0
else:
abslastpos = self.lastpos + self.start
self.level = max(0, self.level - 1)
self.stack.pop()
if self.seekable:
self.start = self.posstack.pop()
if self.level > 0:
self.lastpos = abslastpos - self.start
def is_data(self, line):
return line[:2] != '--'
def section_divider(self, str):
return "--" + str
def end_marker(self, str):
return "--" + str + "--"
|
UrQA/URQA-Server | refs/heads/master | soma3/soma3/settings_dev.py | 1 | from settings import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': urqaDbConfig.get_config('DB_NAME'), # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': urqaDbConfig.get_config('DB_USER'),
'PASSWORD': urqaDbConfig.get_config('DB_PASSWORD'),
'HOST': urqaDbConfig.get_config('DB_HOST'), # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': urqaDbConfig.get_config('DB_PORT'), # Set to empty string for default.
}
}
|
hpcuantwerpen/easybuild-easyblocks | refs/heads/develop | easybuild/easyblocks/s/scalapack.py | 2 | ##
# Copyright 2009-2021 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing ScaLAPACK, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
"""
import glob
import os
from distutils.version import LooseVersion
import easybuild.tools.toolchain as toolchain
from easybuild.easyblocks.blacs import det_interface # @UnresolvedImport
from easybuild.easyblocks.generic.cmakemake import CMakeMake
from easybuild.toolchains.linalg.acml import Acml
from easybuild.toolchains.linalg.atlas import Atlas
from easybuild.toolchains.linalg.blacs import Blacs
from easybuild.toolchains.linalg.blis import Blis
from easybuild.toolchains.linalg.flexiblas import FlexiBLAS, det_flexiblas_backend_libs
from easybuild.toolchains.linalg.gotoblas import GotoBLAS
from easybuild.toolchains.linalg.lapack import Lapack
from easybuild.toolchains.linalg.openblas import OpenBLAS
from easybuild.toolchains.linalg.intelmkl import IntelMKL
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import copy_file, remove_file
from easybuild.tools.modules import get_software_root
from easybuild.tools.run import run_cmd
class EB_ScaLAPACK(CMakeMake):
"""
Support for building and installing ScaLAPACK, both versions 1.x and 2.x
"""
def __init__(self, *args, **kwargs):
"""Constructor of ScaLAPACK easyblock."""
super(EB_ScaLAPACK, self).__init__(*args, **kwargs)
self.loosever = LooseVersion(self.version)
# use CMake for recent versions, but only if CMake is listed as a build dep
build_deps_names = [dep['name'].lower() for dep in self.cfg.builddependencies()]
self.use_cmake = self.loosever >= LooseVersion('2.1.0') and 'cmake' in build_deps_names
def configure_step(self):
"""Configure ScaLAPACK build by copying SLmake.inc.example to SLmake.inc and checking dependencies."""
# use CMake for recent versions, but only if CMake is listed as a build dep
if self.use_cmake:
super(EB_ScaLAPACK, self).configure_step()
else:
src = os.path.join(self.cfg['start_dir'], 'SLmake.inc.example')
dest = os.path.join(self.cfg['start_dir'], 'SLmake.inc')
if os.path.exists(dest):
raise EasyBuildError("Destination file %s exists", dest)
else:
copy_file(src, dest)
def build_libscalapack_make(self):
"""Build libscalapack using 'make -j', after determining the options to pass to make."""
# MPI compiler commands
known_mpi_libs = [toolchain.MPICH, toolchain.MPICH2, toolchain.MVAPICH2] # @UndefinedVariable
known_mpi_libs += [toolchain.OPENMPI, toolchain.QLOGICMPI] # @UndefinedVariable
known_mpi_libs += [toolchain.INTELMPI] # @UndefinedVariable
if os.getenv('MPICC') and os.getenv('MPIF77') and os.getenv('MPIF90'):
mpicc = os.getenv('MPICC')
mpif77 = os.getenv('MPIF77')
mpif90 = os.getenv('MPIF90')
elif self.toolchain.mpi_family() in known_mpi_libs:
mpicc = 'mpicc'
mpif77 = 'mpif77'
mpif90 = 'mpif90'
else:
raise EasyBuildError("Don't know which compiler commands to use.")
# determine build options BLAS and LAPACK libs
extra_makeopts = []
acml = get_software_root(Acml.LAPACK_MODULE_NAME[0])
flexiblas = get_software_root(FlexiBLAS.LAPACK_MODULE_NAME[0])
intelmkl = get_software_root(IntelMKL.LAPACK_MODULE_NAME[0])
lapack = get_software_root(Lapack.LAPACK_MODULE_NAME[0])
openblas = get_software_root(OpenBLAS.LAPACK_MODULE_NAME[0])
if flexiblas:
libdir = os.path.join(flexiblas, 'lib')
blas_libs = ' '.join(['-l%s' % lib for lib in FlexiBLAS.BLAS_LIB])
extra_makeopts.extend([
'BLASLIB="-L%s %s -lpthread"' % (libdir, blas_libs),
'LAPACKLIB="-L%s %s"' % (libdir, blas_libs),
])
elif lapack:
extra_makeopts.append('LAPACKLIB=%s' % os.path.join(lapack, 'lib', 'liblapack.a'))
for blas in [Atlas, Blis, GotoBLAS]:
blas_root = get_software_root(blas.BLAS_MODULE_NAME[0])
if blas_root:
blas_libs = ' '.join(['-l%s' % lib for lib in blas.BLAS_LIB])
blas_libdir = os.path.join(blas_root, 'lib')
extra_makeopts.append('BLASLIB="-L%s %s -lpthread"' % (blas_libdir, blas_libs))
break
if not blas_root:
raise EasyBuildError("Failed to find a known BLAS library, don't know how to define 'BLASLIB'")
elif acml:
acml_base_dir = os.getenv('ACML_BASEDIR', 'NO_ACML_BASEDIR')
acml_static_lib = os.path.join(acml, acml_base_dir, 'lib', 'libacml.a')
extra_makeopts.extend([
'BLASLIB="%s -lpthread"' % acml_static_lib,
'LAPACKLIB=%s' % acml_static_lib
])
elif openblas:
libdir = os.path.join(openblas, 'lib')
blas_libs = ' '.join(['-l%s' % lib for lib in OpenBLAS.BLAS_LIB])
extra_makeopts.extend([
'BLASLIB="-L%s %s -lpthread"' % (libdir, blas_libs),
'LAPACKLIB="-L%s %s"' % (libdir, blas_libs),
])
elif intelmkl:
libdir = os.path.join(intelmkl, 'mkl', 'lib', 'intel64')
blas_libs = os.environ['LIBLAPACK']
extra_makeopts.extend([
'BLASLIB="-L%s %s -lpthread"' % (libdir, blas_libs),
'LAPACKLIB="-L%s %s"' % (libdir, blas_libs),
])
else:
raise EasyBuildError("Unknown LAPACK library used, no idea how to set BLASLIB/LAPACKLIB make options")
# build procedure changed in v2.0.0
if self.loosever < LooseVersion('2.0.0'):
blacs = get_software_root(Blacs.BLACS_MODULE_NAME[0])
if not blacs:
raise EasyBuildError("BLACS not available, yet required for ScaLAPACK version < 2.0.0")
# determine interface
interface = det_interface(self.log, os.path.join(blacs, 'bin'))
# set build and BLACS dir correctly
extra_makeopts.append('home=%s BLACSdir=%s' % (self.cfg['start_dir'], blacs))
# set BLACS libs correctly
blacs_libs = [
('BLACSFINIT', "F77init"),
('BLACSCINIT', "Cinit"),
('BLACSLIB', "")
]
for (var, lib) in blacs_libs:
extra_makeopts.append('%s=%s/lib/libblacs%s.a' % (var, blacs, lib))
# set compilers and options
noopt = ''
if self.toolchain.options['noopt']:
noopt += " -O0"
if self.toolchain.options['pic']:
noopt += " -fPIC"
extra_makeopts += [
'F77="%s"' % mpif77,
'CC="%s"' % mpicc,
'NOOPT="%s"' % noopt,
'CCFLAGS="-O3 %s"' % os.getenv('CFLAGS')
]
# set interface
extra_makeopts.append("CDEFS='-D%s -DNO_IEEE $(USEMPI)'" % interface)
else:
# determine interface
if self.toolchain.mpi_family() in known_mpi_libs:
interface = 'Add_'
else:
raise EasyBuildError("Don't know which interface to pick for the MPI library being used.")
# set compilers and options
extra_makeopts += [
'FC="%s"' % mpif90,
'CC="%s"' % mpicc,
'CCFLAGS="%s"' % os.getenv('CFLAGS'),
'FCFLAGS="%s"' % os.getenv('FFLAGS'),
]
# set interface
extra_makeopts.append('CDEFS="-D%s"' % interface)
# update make opts, and build_step
saved_buildopts = self.cfg['buildopts']
# Only build the library first, that can be done in parallel.
# Creating libscalapack.a may fail in parallel, but should work
# fine with non-parallel make afterwards
self.cfg.update('buildopts', 'lib')
self.cfg.update('buildopts', ' '.join(extra_makeopts))
# Copied from ConfigureMake easyblock
paracmd = ''
if self.cfg['parallel']:
paracmd = "-j %s" % self.cfg['parallel']
cmd = "%s make %s %s" % (self.cfg['prebuildopts'], paracmd, self.cfg['buildopts'])
# Ignore exit code for parallel run
(out, _) = run_cmd(cmd, log_ok=False, log_all=False, simple=False)
# Now prepare to remake libscalapack.a serially and the tests.
self.cfg['buildopts'] = saved_buildopts
self.cfg.update('buildopts', ' '.join(extra_makeopts))
remove_file('libscalapack.a')
self.cfg['parallel'] = 1
def build_step(self):
"""Build ScaLAPACK using make after setting make options."""
# only do a parallel pre-build of libscalapack and set up build options if we're not using CMake
if not self.use_cmake:
self.build_libscalapack_make()
super(EB_ScaLAPACK, self).build_step()
def install_step(self):
"""Install by copying files to install dir."""
if self.use_cmake:
super(EB_ScaLAPACK, self).install_step()
else:
# 'manually' install ScaLAPACK by copying headers and libraries if we're not using CMake
path_info = [
('SRC', 'include', '.h'), # include files
('', 'lib', '.a'), # libraries
]
for (srcdir, destdir, ext) in path_info:
src = os.path.join(self.cfg['start_dir'], srcdir)
dest = os.path.join(self.installdir, destdir)
for lib in glob.glob(os.path.join(src, '*%s' % ext)):
copy_file(lib, os.path.join(dest, os.path.basename(lib)))
self.log.debug("Copied %s to %s", lib, dest)
def banned_linked_shared_libs(self):
"""
List of shared libraries which are not allowed to be linked in any installed binary/library.
"""
res = super(EB_ScaLAPACK, self).banned_linked_shared_libs()
# register FlexiBLAS backends as banned libraries,
# ScaLAPACK should not be linking to those directly
if get_software_root(FlexiBLAS.LAPACK_MODULE_NAME[0]):
res.extend(det_flexiblas_backend_libs())
return res
def sanity_check_step(self):
"""Custom sanity check for ScaLAPACK."""
custom_paths = {
'files': [os.path.join('lib', 'libscalapack.a')],
'dirs': []
}
super(EB_ScaLAPACK, self).sanity_check_step(custom_paths=custom_paths)
|
gradel/mezzanine | refs/heads/master | mezzanine/forms/translation.py | 49 | from modeltranslation.translator import translator, TranslationOptions
from mezzanine.core.translation import TranslatedRichText
from mezzanine.forms.models import Form, Field
class TranslatedForm(TranslatedRichText):
fields = ('button_text', 'response', 'email_subject', 'email_message',)
class TranslatedField(TranslationOptions):
fields = ('label', 'choices', 'default', 'placeholder_text', 'help_text',)
translator.register(Form, TranslatedForm)
translator.register(Field, TranslatedField)
|
vshtanko/scikit-learn | refs/heads/master | sklearn/cluster/tests/test_bicluster.py | 226 | """Testing for Spectral Biclustering methods"""
import numpy as np
from scipy.sparse import csr_matrix, issparse
from sklearn.grid_search import ParameterGrid
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn.base import BaseEstimator, BiclusterMixin
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.cluster.bicluster import _scale_normalize
from sklearn.cluster.bicluster import _bistochastic_normalize
from sklearn.cluster.bicluster import _log_normalize
from sklearn.metrics import consensus_score
from sklearn.datasets import make_biclusters, make_checkerboard
class MockBiclustering(BaseEstimator, BiclusterMixin):
# Mock object for testing get_submatrix.
def __init__(self):
pass
def get_indices(self, i):
# Overridden to reproduce old get_submatrix test.
return (np.where([True, True, False, False, True])[0],
np.where([False, False, True, True])[0])
def test_get_submatrix():
data = np.arange(20).reshape(5, 4)
model = MockBiclustering()
for X in (data, csr_matrix(data), data.tolist()):
submatrix = model.get_submatrix(0, X)
if issparse(submatrix):
submatrix = submatrix.toarray()
assert_array_equal(submatrix, [[2, 3],
[6, 7],
[18, 19]])
submatrix[:] = -1
if issparse(X):
X = X.toarray()
assert_true(np.all(X != -1))
def _test_shape_indices(model):
# Test get_shape and get_indices on fitted model.
for i in range(model.n_clusters):
m, n = model.get_shape(i)
i_ind, j_ind = model.get_indices(i)
assert_equal(len(i_ind), m)
assert_equal(len(j_ind), n)
def test_spectral_coclustering():
# Test Dhillon's Spectral CoClustering on a simple problem.
param_grid = {'svd_method': ['randomized', 'arpack'],
'n_svd_vecs': [None, 20],
'mini_batch': [False, True],
'init': ['k-means++'],
'n_init': [10],
'n_jobs': [1]}
random_state = 0
S, rows, cols = make_biclusters((30, 30), 3, noise=0.5,
random_state=random_state)
S -= S.min() # needs to be nonnegative before making it sparse
S = np.where(S < 1, 0, S) # threshold some values
for mat in (S, csr_matrix(S)):
for kwargs in ParameterGrid(param_grid):
model = SpectralCoclustering(n_clusters=3,
random_state=random_state,
**kwargs)
model.fit(mat)
assert_equal(model.rows_.shape, (3, 30))
assert_array_equal(model.rows_.sum(axis=0), np.ones(30))
assert_array_equal(model.columns_.sum(axis=0), np.ones(30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def test_spectral_biclustering():
# Test Kluger methods on a checkerboard dataset.
S, rows, cols = make_checkerboard((30, 30), 3, noise=0.5,
random_state=0)
non_default_params = {'method': ['scale', 'log'],
'svd_method': ['arpack'],
'n_svd_vecs': [20],
'mini_batch': [True]}
for mat in (S, csr_matrix(S)):
for param_name, param_values in non_default_params.items():
for param_value in param_values:
model = SpectralBiclustering(
n_clusters=3,
n_init=3,
init='k-means++',
random_state=0,
)
model.set_params(**dict([(param_name, param_value)]))
if issparse(mat) and model.get_params().get('method') == 'log':
# cannot take log of sparse matrix
assert_raises(ValueError, model.fit, mat)
continue
else:
model.fit(mat)
assert_equal(model.rows_.shape, (9, 30))
assert_equal(model.columns_.shape, (9, 30))
assert_array_equal(model.rows_.sum(axis=0),
np.repeat(3, 30))
assert_array_equal(model.columns_.sum(axis=0),
np.repeat(3, 30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def _do_scale_test(scaled):
"""Check that rows sum to one constant, and columns to another."""
row_sum = scaled.sum(axis=1)
col_sum = scaled.sum(axis=0)
if issparse(scaled):
row_sum = np.asarray(row_sum).squeeze()
col_sum = np.asarray(col_sum).squeeze()
assert_array_almost_equal(row_sum, np.tile(row_sum.mean(), 100),
decimal=1)
assert_array_almost_equal(col_sum, np.tile(col_sum.mean(), 100),
decimal=1)
def _do_bistochastic_test(scaled):
"""Check that rows and columns sum to the same constant."""
_do_scale_test(scaled)
assert_almost_equal(scaled.sum(axis=0).mean(),
scaled.sum(axis=1).mean(),
decimal=1)
def test_scale_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled, _, _ = _scale_normalize(mat)
_do_scale_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_bistochastic_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled = _bistochastic_normalize(mat)
_do_bistochastic_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_log_normalize():
# adding any constant to a log-scaled matrix should make it
# bistochastic
generator = np.random.RandomState(0)
mat = generator.rand(100, 100)
scaled = _log_normalize(mat) + 1
_do_bistochastic_test(scaled)
def test_fit_best_piecewise():
model = SpectralBiclustering(random_state=0)
vectors = np.array([[0, 0, 0, 1, 1, 1],
[2, 2, 2, 3, 3, 3],
[0, 1, 2, 3, 4, 5]])
best = model._fit_best_piecewise(vectors, n_best=2, n_clusters=2)
assert_array_equal(best, vectors[:2])
def test_project_and_cluster():
model = SpectralBiclustering(random_state=0)
data = np.array([[1, 1, 1],
[1, 1, 1],
[3, 6, 3],
[3, 6, 3]])
vectors = np.array([[1, 0],
[0, 1],
[0, 0]])
for mat in (data, csr_matrix(data)):
labels = model._project_and_cluster(data, vectors,
n_clusters=2)
assert_array_equal(labels, [0, 0, 1, 1])
def test_perfect_checkerboard():
raise SkipTest("This test is failing on the buildbot, but cannot"
" reproduce. Temporarily disabling it until it can be"
" reproduced and fixed.")
model = SpectralBiclustering(3, svd_method="arpack", random_state=0)
S, rows, cols = make_checkerboard((30, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((40, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((30, 40), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
def test_errors():
data = np.arange(25).reshape((5, 5))
model = SpectralBiclustering(n_clusters=(3, 3, 3))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters='abc')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters=(3, 'abc'))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(svd_method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_best=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=3, n_best=4)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering()
data = np.arange(27).reshape((3, 3, 3))
assert_raises(ValueError, model.fit, data)
|
Balburdia/adventofcode | refs/heads/master | day04/puzzle1.py | 1 | #! /usr/bin/python3
from itertools import count
from hashlib import md5
password = 'ckczppom'
for i in count(1):
h = md5((password + str(i)).encode()).hexdigest()
if h[:5] == '00000':
print("MD5: {}".format(h))
print("Number: {}".format(i))
break
|
Azure/azure-sdk-for-python | refs/heads/sync-eng/common-js-nightly-docs-2-1768-ForTestPipeline | sdk/keyvault/azure-keyvault-keys/azure/keyvault/keys/_generated/v2016_10_01/__init__.py | 17 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._key_vault_client import KeyVaultClient
__all__ = ['KeyVaultClient']
try:
from ._patch import patch_sdk # type: ignore
patch_sdk()
except ImportError:
pass
|
RayMick/scikit-learn | refs/heads/master | examples/linear_model/plot_lasso_model_selection.py | 311 | """
===================================================
Lasso model selection: Cross-Validation / AIC / BIC
===================================================
Use the Akaike information criterion (AIC), the Bayes Information
criterion (BIC) and cross-validation to select an optimal value
of the regularization parameter alpha of the :ref:`lasso` estimator.
Results obtained with LassoLarsIC are based on AIC/BIC criteria.
Information-criterion based model selection is very fast, but it
relies on a proper estimation of degrees of freedom, are
derived for large samples (asymptotic results) and assume the model
is correct, i.e. that the data are actually generated by this model.
They also tend to break when the problem is badly conditioned
(more features than samples).
For cross-validation, we use 20-fold with 2 algorithms to compute the
Lasso path: coordinate descent, as implemented by the LassoCV class, and
Lars (least angle regression) as implemented by the LassoLarsCV class.
Both algorithms give roughly the same results. They differ with regards
to their execution speed and sources of numerical errors.
Lars computes a path solution only for each kink in the path. As a
result, it is very efficient when there are only of few kinks, which is
the case if there are few features or samples. Also, it is able to
compute the full path without setting any meta parameter. On the
opposite, coordinate descent compute the path points on a pre-specified
grid (here we use the default). Thus it is more efficient if the number
of grid points is smaller than the number of kinks in the path. Such a
strategy can be interesting if the number of features is really large
and there are enough samples to select a large amount. In terms of
numerical errors, for heavily correlated variables, Lars will accumulate
more errors, while the coordinate descent algorithm will only sample the
path on a grid.
Note how the optimal value of alpha varies for each fold. This
illustrates why nested-cross validation is necessary when trying to
evaluate the performance of a method for which a parameter is chosen by
cross-validation: this choice of parameter may not be optimal for unseen
data.
"""
print(__doc__)
# Author: Olivier Grisel, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LassoCV, LassoLarsCV, LassoLarsIC
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
rng = np.random.RandomState(42)
X = np.c_[X, rng.randn(X.shape[0], 14)] # add some bad features
# normalize data as done by Lars to allow for comparison
X /= np.sqrt(np.sum(X ** 2, axis=0))
##############################################################################
# LassoLarsIC: least angle regression with BIC/AIC criterion
model_bic = LassoLarsIC(criterion='bic')
t1 = time.time()
model_bic.fit(X, y)
t_bic = time.time() - t1
alpha_bic_ = model_bic.alpha_
model_aic = LassoLarsIC(criterion='aic')
model_aic.fit(X, y)
alpha_aic_ = model_aic.alpha_
def plot_ic_criterion(model, name, color):
alpha_ = model.alpha_
alphas_ = model.alphas_
criterion_ = model.criterion_
plt.plot(-np.log10(alphas_), criterion_, '--', color=color,
linewidth=3, label='%s criterion' % name)
plt.axvline(-np.log10(alpha_), color=color, linewidth=3,
label='alpha: %s estimate' % name)
plt.xlabel('-log(alpha)')
plt.ylabel('criterion')
plt.figure()
plot_ic_criterion(model_aic, 'AIC', 'b')
plot_ic_criterion(model_bic, 'BIC', 'r')
plt.legend()
plt.title('Information-criterion for model selection (training time %.3fs)'
% t_bic)
##############################################################################
# LassoCV: coordinate descent
# Compute paths
print("Computing regularization path using the coordinate descent lasso...")
t1 = time.time()
model = LassoCV(cv=20).fit(X, y)
t_lasso_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.alphas_)
plt.figure()
ymin, ymax = 2300, 3800
plt.plot(m_log_alphas, model.mse_path_, ':')
plt.plot(m_log_alphas, model.mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha: CV estimate')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: coordinate descent '
'(train time: %.2fs)' % t_lasso_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
##############################################################################
# LassoLarsCV: least angle regression
# Compute paths
print("Computing regularization path using the Lars lasso...")
t1 = time.time()
model = LassoLarsCV(cv=20).fit(X, y)
t_lasso_lars_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.cv_alphas_)
plt.figure()
plt.plot(m_log_alphas, model.cv_mse_path_, ':')
plt.plot(m_log_alphas, model.cv_mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha CV')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: Lars (train time: %.2fs)'
% t_lasso_lars_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
plt.show()
|
loop1024/pymo-global | refs/heads/master | android/pgs4a-0.9.6/python-install/lib/python2.7/test/test_doctest2.py | 137 | # -*- coding: utf-8 -*-
u"""A module to test whether doctest recognizes some 2.2 features,
like static and class methods.
>>> print 'yup' # 1
yup
We include some (random) encoded (utf-8) text in the text surrounding
the example. It should be ignored:
ЉЊЈЁЂ
"""
import sys
import unittest
from test import test_support
if sys.flags.optimize >= 2:
raise unittest.SkipTest("Cannot test docstrings with -O2")
class C(object):
u"""Class C.
>>> print C() # 2
42
We include some (random) encoded (utf-8) text in the text surrounding
the example. It should be ignored:
ЉЊЈЁЂ
"""
def __init__(self):
"""C.__init__.
>>> print C() # 3
42
"""
def __str__(self):
"""
>>> print C() # 4
42
"""
return "42"
class D(object):
"""A nested D class.
>>> print "In D!" # 5
In D!
"""
def nested(self):
"""
>>> print 3 # 6
3
"""
def getx(self):
"""
>>> c = C() # 7
>>> c.x = 12 # 8
>>> print c.x # 9
-12
"""
return -self._x
def setx(self, value):
"""
>>> c = C() # 10
>>> c.x = 12 # 11
>>> print c.x # 12
-12
"""
self._x = value
x = property(getx, setx, doc="""\
>>> c = C() # 13
>>> c.x = 12 # 14
>>> print c.x # 15
-12
""")
@staticmethod
def statm():
"""
A static method.
>>> print C.statm() # 16
666
>>> print C().statm() # 17
666
"""
return 666
@classmethod
def clsm(cls, val):
"""
A class method.
>>> print C.clsm(22) # 18
22
>>> print C().clsm(23) # 19
23
"""
return val
def test_main():
from test import test_doctest2
EXPECTED = 19
f, t = test_support.run_doctest(test_doctest2)
if t != EXPECTED:
raise test_support.TestFailed("expected %d tests to run, not %d" %
(EXPECTED, t))
# Pollute the namespace with a bunch of imported functions and classes,
# to make sure they don't get tested.
from doctest import *
if __name__ == '__main__':
test_main()
|
popazerty/SDG-e2 | refs/heads/master | lib/python/Tools/GetEcmInfo.py | 11 | import os
import time
ECM_INFO = '/tmp/ecm.info'
EMPTY_ECM_INFO = '','0','0','0'
old_ecm_time = time.time()
info = {}
ecm = ''
data = EMPTY_ECM_INFO
class GetEcmInfo:
def pollEcmData(self):
global data
global old_ecm_time
global info
global ecm
try:
ecm_time = os.stat(ECM_INFO).st_mtime
except:
ecm_time = old_ecm_time
data = EMPTY_ECM_INFO
info = {}
ecm = ''
if ecm_time != old_ecm_time:
oecmi1 = info.get('ecminterval1','')
oecmi0 = info.get('ecminterval0','')
info = {}
info['ecminterval2'] = oecmi1
info['ecminterval1'] = oecmi0
old_ecm_time = ecm_time
try:
ecm = open(ECM_INFO, 'rb').readlines()
except:
ecm = ''
for line in ecm:
d = line.split(':', 1)
if len(d) > 1:
info[d[0].strip()] = d[1].strip()
data = self.getText()
return True
else:
info['ecminterval0'] = int(time.time()-ecm_time+0.5)
def getEcm(self):
return (self.pollEcmData(), ecm)
def getEcmData(self):
self.pollEcmData()
return data
def getInfo(self, member, ifempty = ''):
self.pollEcmData()
return str(info.get(member, ifempty))
def getText(self):
global ecm
try:
# info is dictionary
using = info.get('using', '')
if using:
# CCcam
if using == 'fta':
self.textvalue = _("FTA")
elif using == 'emu':
self.textvalue = "EMU (%ss)" % (info.get('ecm time', '?'))
else:
hops = info.get('hops', None)
if hops and hops != '0':
hops = ' @' + hops
else:
hops = ''
self.textvalue = info.get('address', '?') + hops + " (%ss)" % info.get('ecm time', '?')
else:
decode = info.get('decode', None)
if decode:
# gbox (untested)
if info['decode'] == 'Network':
cardid = 'id:' + info.get('prov', '')
try:
share = open('/tmp/share.info', 'rb').readlines()
for line in share:
if cardid in line:
self.textvalue = line.strip()
break
else:
self.textvalue = cardid
except:
self.textvalue = decode
else:
self.textvalue = decode
if ecm[1].startswith('SysID'):
info['prov'] = ecm[1].strip()[6:]
if info['response'] and 'CaID 0x' in ecm[0] and 'pid 0x' in ecm[0]:
self.textvalue = self.textvalue + " (0.%ss)" % info['response']
info['caid'] = ecm[0][ecm[0].find('CaID 0x')+7:ecm[0].find(',')]
info['pid'] = ecm[0][ecm[0].find('pid 0x')+6:ecm[0].find(' =')]
info['provid'] = info.get('prov', '0')[:4]
else:
source = info.get('source', None)
if source:
# MGcam
info['caid'] = info['caid'][2:]
info['pid'] = info['pid'][2:]
info['provid'] = info['prov'][2:]
time = ""
for line in ecm:
if 'msec' in line:
line = line.split(' ')
if line[0]:
time = " (%ss)" % (float(line[0])/1000)
continue
self.textvalue = source + time
else:
reader = info.get('reader', '')
if reader:
hops = info.get('hops', None)
if hops and hops != '0':
hops = ' @' + hops
else:
hops = ''
self.textvalue = reader + hops + " (%ss)" % info.get('ecm time', '?')
else:
self.textvalue = ""
decCI = info.get('caid', '0')
provid = info.get('provid', '0')
if provid == '0':
provid = info.get('prov', '0')
ecmpid = info.get('pid', '0')
except:
ecm = ''
self.textvalue = ""
decCI='0'
provid='0'
ecmpid='0'
return self.textvalue,decCI,provid,ecmpid
|
badcure/sugarcoat | refs/heads/master | sugarcoat/api/__init__.py | 2 | import flask
from sugarcoat.api.base import app
import sugarcoat.api.template_filters
import sugarcoat.rackspacecloud.blueprint.base
import sugarcoat.openweathermap.blueprint.base
import sugarcoat.sunlightfoundation.blueprint.base
app.register_blueprint(sugarcoat.rackspacecloud.blueprint.base.app)
app.register_blueprint(sugarcoat.openweathermap.blueprint.base.app)
app.register_blueprint(sugarcoat.sunlightfoundation.blueprint.base.app)
|
Reflexe/doc_to_pdf | refs/heads/master | Windows/program/python-core-3.5.0/lib/unittest/test/test_case.py | 2 | import contextlib
import difflib
import pprint
import pickle
import re
import sys
import logging
import warnings
import weakref
import inspect
from copy import deepcopy
from test import support
import unittest
from unittest.test.support import (
TestEquality, TestHashing, LoggingResult, LegacyLoggingResult,
ResultWithNoStartTestRunStopTestRun
)
from test.support import captured_stderr
log_foo = logging.getLogger('foo')
log_foobar = logging.getLogger('foo.bar')
log_quux = logging.getLogger('quux')
class Test(object):
"Keep these TestCase classes out of the main namespace"
class Foo(unittest.TestCase):
def runTest(self): pass
def test1(self): pass
class Bar(Foo):
def test2(self): pass
class LoggingTestCase(unittest.TestCase):
"""A test case which logs its calls."""
def __init__(self, events):
super(Test.LoggingTestCase, self).__init__('test')
self.events = events
def setUp(self):
self.events.append('setUp')
def test(self):
self.events.append('test')
def tearDown(self):
self.events.append('tearDown')
class Test_TestCase(unittest.TestCase, TestEquality, TestHashing):
### Set up attributes used by inherited tests
################################################################
# Used by TestHashing.test_hash and TestEquality.test_eq
eq_pairs = [(Test.Foo('test1'), Test.Foo('test1'))]
# Used by TestEquality.test_ne
ne_pairs = [(Test.Foo('test1'), Test.Foo('runTest')),
(Test.Foo('test1'), Test.Bar('test1')),
(Test.Foo('test1'), Test.Bar('test2'))]
################################################################
### /Set up attributes used by inherited tests
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
# ...
# "methodName defaults to "runTest"."
#
# Make sure it really is optional, and that it defaults to the proper
# thing.
def test_init__no_test_name(self):
class Test(unittest.TestCase):
def runTest(self): raise MyException()
def test(self): pass
self.assertEqual(Test().id()[-13:], '.Test.runTest')
# test that TestCase can be instantiated with no args
# primarily for use at the interactive interpreter
test = unittest.TestCase()
test.assertEqual(3, 3)
with test.assertRaises(test.failureException):
test.assertEqual(3, 2)
with self.assertRaises(AttributeError):
test.run()
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
def test_init__test_name__valid(self):
class Test(unittest.TestCase):
def runTest(self): raise MyException()
def test(self): pass
self.assertEqual(Test('test').id()[-10:], '.Test.test')
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
def test_init__test_name__invalid(self):
class Test(unittest.TestCase):
def runTest(self): raise MyException()
def test(self): pass
try:
Test('testfoo')
except ValueError:
pass
else:
self.fail("Failed to raise ValueError")
# "Return the number of tests represented by the this test object. For
# TestCase instances, this will always be 1"
def test_countTestCases(self):
class Foo(unittest.TestCase):
def test(self): pass
self.assertEqual(Foo('test').countTestCases(), 1)
# "Return the default type of test result object to be used to run this
# test. For TestCase instances, this will always be
# unittest.TestResult; subclasses of TestCase should
# override this as necessary."
def test_defaultTestResult(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
result = Foo().defaultTestResult()
self.assertEqual(type(result), unittest.TestResult)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if setUp() raises
# an exception.
def test_run_call_order__error_in_setUp(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def setUp(self):
super(Foo, self).setUp()
raise RuntimeError('raised by Foo.setUp')
Foo(events).run(result)
expected = ['startTest', 'setUp', 'addError', 'stopTest']
self.assertEqual(events, expected)
# "With a temporary result stopTestRun is called when setUp errors.
def test_run_call_order__error_in_setUp_default_result(self):
events = []
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def setUp(self):
super(Foo, self).setUp()
raise RuntimeError('raised by Foo.setUp')
Foo(events).run()
expected = ['startTestRun', 'startTest', 'setUp', 'addError',
'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test raises
# an error (as opposed to a failure).
def test_run_call_order__error_in_test(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
raise RuntimeError('raised by Foo.test')
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addError', 'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
# "With a default result, an error in the test still results in stopTestRun
# being called."
def test_run_call_order__error_in_test_default_result(self):
events = []
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def test(self):
super(Foo, self).test()
raise RuntimeError('raised by Foo.test')
expected = ['startTestRun', 'startTest', 'setUp', 'test',
'tearDown', 'addError', 'stopTest', 'stopTestRun']
Foo(events).run()
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test signals
# a failure (as opposed to an error).
def test_run_call_order__failure_in_test(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
self.fail('raised by Foo.test')
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addFailure', 'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
# "When a test fails with a default result stopTestRun is still called."
def test_run_call_order__failure_in_test_default_result(self):
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def test(self):
super(Foo, self).test()
self.fail('raised by Foo.test')
expected = ['startTestRun', 'startTest', 'setUp', 'test',
'tearDown', 'addFailure', 'stopTest', 'stopTestRun']
events = []
Foo(events).run()
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if tearDown() raises
# an exception.
def test_run_call_order__error_in_tearDown(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def tearDown(self):
super(Foo, self).tearDown()
raise RuntimeError('raised by Foo.tearDown')
Foo(events).run(result)
expected = ['startTest', 'setUp', 'test', 'tearDown', 'addError',
'stopTest']
self.assertEqual(events, expected)
# "When tearDown errors with a default result stopTestRun is still called."
def test_run_call_order__error_in_tearDown_default_result(self):
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def tearDown(self):
super(Foo, self).tearDown()
raise RuntimeError('raised by Foo.tearDown')
events = []
Foo(events).run()
expected = ['startTestRun', 'startTest', 'setUp', 'test', 'tearDown',
'addError', 'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "TestCase.run() still works when the defaultTestResult is a TestResult
# that does not support startTestRun and stopTestRun.
def test_run_call_order_default_result(self):
class Foo(unittest.TestCase):
def defaultTestResult(self):
return ResultWithNoStartTestRunStopTestRun()
def test(self):
pass
Foo('test').run()
def _check_call_order__subtests(self, result, events, expected_events):
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
for i in [1, 2, 3]:
with self.subTest(i=i):
if i == 1:
self.fail('failure')
for j in [2, 3]:
with self.subTest(j=j):
if i * j == 6:
raise RuntimeError('raised by Foo.test')
1 / 0
# Order is the following:
# i=1 => subtest failure
# i=2, j=2 => subtest success
# i=2, j=3 => subtest error
# i=3, j=2 => subtest error
# i=3, j=3 => subtest success
# toplevel => error
Foo(events).run(result)
self.assertEqual(events, expected_events)
def test_run_call_order__subtests(self):
events = []
result = LoggingResult(events)
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addSubTestFailure', 'addSubTestSuccess',
'addSubTestFailure', 'addSubTestFailure',
'addSubTestSuccess', 'addError', 'stopTest']
self._check_call_order__subtests(result, events, expected)
def test_run_call_order__subtests_legacy(self):
# With a legacy result object (without an addSubTest method),
# text execution stops after the first subtest failure.
events = []
result = LegacyLoggingResult(events)
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addFailure', 'stopTest']
self._check_call_order__subtests(result, events, expected)
def _check_call_order__subtests_success(self, result, events, expected_events):
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
for i in [1, 2]:
with self.subTest(i=i):
for j in [2, 3]:
with self.subTest(j=j):
pass
Foo(events).run(result)
self.assertEqual(events, expected_events)
def test_run_call_order__subtests_success(self):
events = []
result = LoggingResult(events)
# The 6 subtest successes are individually recorded, in addition
# to the whole test success.
expected = (['startTest', 'setUp', 'test', 'tearDown']
+ 6 * ['addSubTestSuccess']
+ ['addSuccess', 'stopTest'])
self._check_call_order__subtests_success(result, events, expected)
def test_run_call_order__subtests_success_legacy(self):
# With a legacy result, only the whole test success is recorded.
events = []
result = LegacyLoggingResult(events)
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addSuccess', 'stopTest']
self._check_call_order__subtests_success(result, events, expected)
def test_run_call_order__subtests_failfast(self):
events = []
result = LoggingResult(events)
result.failfast = True
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
with self.subTest(i=1):
self.fail('failure')
with self.subTest(i=2):
self.fail('failure')
self.fail('failure')
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addSubTestFailure', 'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
def test_subtests_failfast(self):
# Ensure proper test flow with subtests and failfast (issue #22894)
events = []
class Foo(unittest.TestCase):
def test_a(self):
with self.subTest():
events.append('a1')
events.append('a2')
def test_b(self):
with self.subTest():
events.append('b1')
with self.subTest():
self.fail('failure')
events.append('b2')
def test_c(self):
events.append('c')
result = unittest.TestResult()
result.failfast = True
suite = unittest.makeSuite(Foo)
suite.run(result)
expected = ['a1', 'a2', 'b1']
self.assertEqual(events, expected)
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework. The initial value of this
# attribute is AssertionError"
def test_failureException__default(self):
class Foo(unittest.TestCase):
def test(self):
pass
self.assertIs(Foo('test').failureException, AssertionError)
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework."
#
# Make sure TestCase.run() respects the designated failureException
def test_failureException__subclassing__explicit_raise(self):
events = []
result = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
raise RuntimeError()
failureException = RuntimeError
self.assertIs(Foo('test').failureException, RuntimeError)
Foo('test').run(result)
expected = ['startTest', 'addFailure', 'stopTest']
self.assertEqual(events, expected)
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework."
#
# Make sure TestCase.run() respects the designated failureException
def test_failureException__subclassing__implicit_raise(self):
events = []
result = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
self.fail("foo")
failureException = RuntimeError
self.assertIs(Foo('test').failureException, RuntimeError)
Foo('test').run(result)
expected = ['startTest', 'addFailure', 'stopTest']
self.assertEqual(events, expected)
# "The default implementation does nothing."
def test_setUp(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
# ... and nothing should happen
Foo().setUp()
# "The default implementation does nothing."
def test_tearDown(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
# ... and nothing should happen
Foo().tearDown()
# "Return a string identifying the specific test case."
#
# Because of the vague nature of the docs, I'm not going to lock this
# test down too much. Really all that can be asserted is that the id()
# will be a string (either 8-byte or unicode -- again, because the docs
# just say "string")
def test_id(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
self.assertIsInstance(Foo().id(), str)
# "If result is omitted or None, a temporary result object is created,
# used, and is made available to the caller. As TestCase owns the
# temporary result startTestRun and stopTestRun are called.
def test_run__uses_defaultTestResult(self):
events = []
defaultResult = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
events.append('test')
def defaultTestResult(self):
return defaultResult
# Make run() find a result object on its own
result = Foo('test').run()
self.assertIs(result, defaultResult)
expected = ['startTestRun', 'startTest', 'test', 'addSuccess',
'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "The result object is returned to run's caller"
def test_run__returns_given_result(self):
class Foo(unittest.TestCase):
def test(self):
pass
result = unittest.TestResult()
retval = Foo('test').run(result)
self.assertIs(retval, result)
# "The same effect [as method run] may be had by simply calling the
# TestCase instance."
def test_call__invoking_an_instance_delegates_to_run(self):
resultIn = unittest.TestResult()
resultOut = unittest.TestResult()
class Foo(unittest.TestCase):
def test(self):
pass
def run(self, result):
self.assertIs(result, resultIn)
return resultOut
retval = Foo('test')(resultIn)
self.assertIs(retval, resultOut)
def testShortDescriptionWithoutDocstring(self):
self.assertIsNone(self.shortDescription())
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testShortDescriptionWithOneLineDocstring(self):
"""Tests shortDescription() for a method with a docstring."""
self.assertEqual(
self.shortDescription(),
'Tests shortDescription() for a method with a docstring.')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testShortDescriptionWithMultiLineDocstring(self):
"""Tests shortDescription() for a method with a longer docstring.
This method ensures that only the first line of a docstring is
returned used in the short description, no matter how long the
whole thing is.
"""
self.assertEqual(
self.shortDescription(),
'Tests shortDescription() for a method with a longer '
'docstring.')
def testAddTypeEqualityFunc(self):
class SadSnake(object):
"""Dummy class for test_addTypeEqualityFunc."""
s1, s2 = SadSnake(), SadSnake()
self.assertFalse(s1 == s2)
def AllSnakesCreatedEqual(a, b, msg=None):
return type(a) == type(b) == SadSnake
self.addTypeEqualityFunc(SadSnake, AllSnakesCreatedEqual)
self.assertEqual(s1, s2)
# No this doesn't clean up and remove the SadSnake equality func
# from this TestCase instance but since its a local nothing else
# will ever notice that.
def testAssertIs(self):
thing = object()
self.assertIs(thing, thing)
self.assertRaises(self.failureException, self.assertIs, thing, object())
def testAssertIsNot(self):
thing = object()
self.assertIsNot(thing, object())
self.assertRaises(self.failureException, self.assertIsNot, thing, thing)
def testAssertIsInstance(self):
thing = []
self.assertIsInstance(thing, list)
self.assertRaises(self.failureException, self.assertIsInstance,
thing, dict)
def testAssertNotIsInstance(self):
thing = []
self.assertNotIsInstance(thing, dict)
self.assertRaises(self.failureException, self.assertNotIsInstance,
thing, list)
def testAssertIn(self):
animals = {'monkey': 'banana', 'cow': 'grass', 'seal': 'fish'}
self.assertIn('a', 'abc')
self.assertIn(2, [1, 2, 3])
self.assertIn('monkey', animals)
self.assertNotIn('d', 'abc')
self.assertNotIn(0, [1, 2, 3])
self.assertNotIn('otter', animals)
self.assertRaises(self.failureException, self.assertIn, 'x', 'abc')
self.assertRaises(self.failureException, self.assertIn, 4, [1, 2, 3])
self.assertRaises(self.failureException, self.assertIn, 'elephant',
animals)
self.assertRaises(self.failureException, self.assertNotIn, 'c', 'abc')
self.assertRaises(self.failureException, self.assertNotIn, 1, [1, 2, 3])
self.assertRaises(self.failureException, self.assertNotIn, 'cow',
animals)
def testAssertDictContainsSubset(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertDictContainsSubset({}, {})
self.assertDictContainsSubset({}, {'a': 1})
self.assertDictContainsSubset({'a': 1}, {'a': 1})
self.assertDictContainsSubset({'a': 1}, {'a': 1, 'b': 2})
self.assertDictContainsSubset({'a': 1, 'b': 2}, {'a': 1, 'b': 2})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({1: "one"}, {})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 2}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'c': 1}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 1, 'c': 1}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 1, 'c': 1}, {'a': 1})
one = ''.join(chr(i) for i in range(255))
# this used to cause a UnicodeDecodeError constructing the failure msg
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'foo': one}, {'foo': '\uFFFD'})
def testAssertEqual(self):
equal_pairs = [
((), ()),
({}, {}),
([], []),
(set(), set()),
(frozenset(), frozenset())]
for a, b in equal_pairs:
# This mess of try excepts is to test the assertEqual behavior
# itself.
try:
self.assertEqual(a, b)
except self.failureException:
self.fail('assertEqual(%r, %r) failed' % (a, b))
try:
self.assertEqual(a, b, msg='foo')
except self.failureException:
self.fail('assertEqual(%r, %r) with msg= failed' % (a, b))
try:
self.assertEqual(a, b, 'foo')
except self.failureException:
self.fail('assertEqual(%r, %r) with third parameter failed' %
(a, b))
unequal_pairs = [
((), []),
({}, set()),
(set([4,1]), frozenset([4,2])),
(frozenset([4,5]), set([2,3])),
(set([3,4]), set([5,4]))]
for a, b in unequal_pairs:
self.assertRaises(self.failureException, self.assertEqual, a, b)
self.assertRaises(self.failureException, self.assertEqual, a, b,
'foo')
self.assertRaises(self.failureException, self.assertEqual, a, b,
msg='foo')
def testEquality(self):
self.assertListEqual([], [])
self.assertTupleEqual((), ())
self.assertSequenceEqual([], ())
a = [0, 'a', []]
b = []
self.assertRaises(unittest.TestCase.failureException,
self.assertListEqual, a, b)
self.assertRaises(unittest.TestCase.failureException,
self.assertListEqual, tuple(a), tuple(b))
self.assertRaises(unittest.TestCase.failureException,
self.assertSequenceEqual, a, tuple(b))
b.extend(a)
self.assertListEqual(a, b)
self.assertTupleEqual(tuple(a), tuple(b))
self.assertSequenceEqual(a, tuple(b))
self.assertSequenceEqual(tuple(a), b)
self.assertRaises(self.failureException, self.assertListEqual,
a, tuple(b))
self.assertRaises(self.failureException, self.assertTupleEqual,
tuple(a), b)
self.assertRaises(self.failureException, self.assertListEqual, None, b)
self.assertRaises(self.failureException, self.assertTupleEqual, None,
tuple(b))
self.assertRaises(self.failureException, self.assertSequenceEqual,
None, tuple(b))
self.assertRaises(self.failureException, self.assertListEqual, 1, 1)
self.assertRaises(self.failureException, self.assertTupleEqual, 1, 1)
self.assertRaises(self.failureException, self.assertSequenceEqual,
1, 1)
self.assertDictEqual({}, {})
c = { 'x': 1 }
d = {}
self.assertRaises(unittest.TestCase.failureException,
self.assertDictEqual, c, d)
d.update(c)
self.assertDictEqual(c, d)
d['x'] = 0
self.assertRaises(unittest.TestCase.failureException,
self.assertDictEqual, c, d, 'These are unequal')
self.assertRaises(self.failureException, self.assertDictEqual, None, d)
self.assertRaises(self.failureException, self.assertDictEqual, [], d)
self.assertRaises(self.failureException, self.assertDictEqual, 1, 1)
def testAssertSequenceEqualMaxDiff(self):
self.assertEqual(self.maxDiff, 80*8)
seq1 = 'a' + 'x' * 80**2
seq2 = 'b' + 'x' * 80**2
diff = '\n'.join(difflib.ndiff(pprint.pformat(seq1).splitlines(),
pprint.pformat(seq2).splitlines()))
# the +1 is the leading \n added by assertSequenceEqual
omitted = unittest.case.DIFF_OMITTED % (len(diff) + 1,)
self.maxDiff = len(diff)//2
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertLess(len(msg), len(diff))
self.assertIn(omitted, msg)
self.maxDiff = len(diff) * 2
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertGreater(len(msg), len(diff))
self.assertNotIn(omitted, msg)
self.maxDiff = None
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertGreater(len(msg), len(diff))
self.assertNotIn(omitted, msg)
def testTruncateMessage(self):
self.maxDiff = 1
message = self._truncateMessage('foo', 'bar')
omitted = unittest.case.DIFF_OMITTED % len('bar')
self.assertEqual(message, 'foo' + omitted)
self.maxDiff = None
message = self._truncateMessage('foo', 'bar')
self.assertEqual(message, 'foobar')
self.maxDiff = 4
message = self._truncateMessage('foo', 'bar')
self.assertEqual(message, 'foobar')
def testAssertDictEqualTruncates(self):
test = unittest.TestCase('assertEqual')
def truncate(msg, diff):
return 'foo'
test._truncateMessage = truncate
try:
test.assertDictEqual({}, {1: 0})
except self.failureException as e:
self.assertEqual(str(e), 'foo')
else:
self.fail('assertDictEqual did not fail')
def testAssertMultiLineEqualTruncates(self):
test = unittest.TestCase('assertEqual')
def truncate(msg, diff):
return 'foo'
test._truncateMessage = truncate
try:
test.assertMultiLineEqual('foo', 'bar')
except self.failureException as e:
self.assertEqual(str(e), 'foo')
else:
self.fail('assertMultiLineEqual did not fail')
def testAssertEqual_diffThreshold(self):
# check threshold value
self.assertEqual(self._diffThreshold, 2**16)
# disable madDiff to get diff markers
self.maxDiff = None
# set a lower threshold value and add a cleanup to restore it
old_threshold = self._diffThreshold
self._diffThreshold = 2**5
self.addCleanup(lambda: setattr(self, '_diffThreshold', old_threshold))
# under the threshold: diff marker (^) in error message
s = 'x' * (2**4)
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s + 'a', s + 'b')
self.assertIn('^', str(cm.exception))
self.assertEqual(s + 'a', s + 'a')
# over the threshold: diff not used and marker (^) not in error message
s = 'x' * (2**6)
# if the path that uses difflib is taken, _truncateMessage will be
# called -- replace it with explodingTruncation to verify that this
# doesn't happen
def explodingTruncation(message, diff):
raise SystemError('this should not be raised')
old_truncate = self._truncateMessage
self._truncateMessage = explodingTruncation
self.addCleanup(lambda: setattr(self, '_truncateMessage', old_truncate))
s1, s2 = s + 'a', s + 'b'
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s1, s2)
self.assertNotIn('^', str(cm.exception))
self.assertEqual(str(cm.exception), '%r != %r' % (s1, s2))
self.assertEqual(s + 'a', s + 'a')
def testAssertEqual_shorten(self):
# set a lower threshold value and add a cleanup to restore it
old_threshold = self._diffThreshold
self._diffThreshold = 0
self.addCleanup(lambda: setattr(self, '_diffThreshold', old_threshold))
s = 'x' * 100
s1, s2 = s + 'a', s + 'b'
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s1, s2)
c = 'xxxx[35 chars]' + 'x' * 61
self.assertEqual(str(cm.exception), "'%sa' != '%sb'" % (c, c))
self.assertEqual(s + 'a', s + 'a')
p = 'y' * 50
s1, s2 = s + 'a' + p, s + 'b' + p
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s1, s2)
c = 'xxxx[85 chars]xxxxxxxxxxx'
self.assertEqual(str(cm.exception), "'%sa%s' != '%sb%s'" % (c, p, c, p))
p = 'y' * 100
s1, s2 = s + 'a' + p, s + 'b' + p
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s1, s2)
c = 'xxxx[91 chars]xxxxx'
d = 'y' * 40 + '[56 chars]yyyy'
self.assertEqual(str(cm.exception), "'%sa%s' != '%sb%s'" % (c, d, c, d))
def testAssertCountEqual(self):
a = object()
self.assertCountEqual([1, 2, 3], [3, 2, 1])
self.assertCountEqual(['foo', 'bar', 'baz'], ['bar', 'baz', 'foo'])
self.assertCountEqual([a, a, 2, 2, 3], (a, 2, 3, a, 2))
self.assertCountEqual([1, "2", "a", "a"], ["a", "2", True, "a"])
self.assertRaises(self.failureException, self.assertCountEqual,
[1, 2] + [3] * 100, [1] * 100 + [2, 3])
self.assertRaises(self.failureException, self.assertCountEqual,
[1, "2", "a", "a"], ["a", "2", True, 1])
self.assertRaises(self.failureException, self.assertCountEqual,
[10], [10, 11])
self.assertRaises(self.failureException, self.assertCountEqual,
[10, 11], [10])
self.assertRaises(self.failureException, self.assertCountEqual,
[10, 11, 10], [10, 11])
# Test that sequences of unhashable objects can be tested for sameness:
self.assertCountEqual([[1, 2], [3, 4], 0], [False, [3, 4], [1, 2]])
# Test that iterator of unhashable objects can be tested for sameness:
self.assertCountEqual(iter([1, 2, [], 3, 4]),
iter([1, 2, [], 3, 4]))
# hashable types, but not orderable
self.assertRaises(self.failureException, self.assertCountEqual,
[], [divmod, 'x', 1, 5j, 2j, frozenset()])
# comparing dicts
self.assertCountEqual([{'a': 1}, {'b': 2}], [{'b': 2}, {'a': 1}])
# comparing heterogenous non-hashable sequences
self.assertCountEqual([1, 'x', divmod, []], [divmod, [], 'x', 1])
self.assertRaises(self.failureException, self.assertCountEqual,
[], [divmod, [], 'x', 1, 5j, 2j, set()])
self.assertRaises(self.failureException, self.assertCountEqual,
[[1]], [[2]])
# Same elements, but not same sequence length
self.assertRaises(self.failureException, self.assertCountEqual,
[1, 1, 2], [2, 1])
self.assertRaises(self.failureException, self.assertCountEqual,
[1, 1, "2", "a", "a"], ["2", "2", True, "a"])
self.assertRaises(self.failureException, self.assertCountEqual,
[1, {'b': 2}, None, True], [{'b': 2}, True, None])
# Same elements which don't reliably compare, in
# different order, see issue 10242
a = [{2,4}, {1,2}]
b = a[::-1]
self.assertCountEqual(a, b)
# test utility functions supporting assertCountEqual()
diffs = set(unittest.util._count_diff_all_purpose('aaabccd', 'abbbcce'))
expected = {(3,1,'a'), (1,3,'b'), (1,0,'d'), (0,1,'e')}
self.assertEqual(diffs, expected)
diffs = unittest.util._count_diff_all_purpose([[]], [])
self.assertEqual(diffs, [(1, 0, [])])
diffs = set(unittest.util._count_diff_hashable('aaabccd', 'abbbcce'))
expected = {(3,1,'a'), (1,3,'b'), (1,0,'d'), (0,1,'e')}
self.assertEqual(diffs, expected)
def testAssertSetEqual(self):
set1 = set()
set2 = set()
self.assertSetEqual(set1, set2)
self.assertRaises(self.failureException, self.assertSetEqual, None, set2)
self.assertRaises(self.failureException, self.assertSetEqual, [], set2)
self.assertRaises(self.failureException, self.assertSetEqual, set1, None)
self.assertRaises(self.failureException, self.assertSetEqual, set1, [])
set1 = set(['a'])
set2 = set()
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a'])
set2 = set(['a'])
self.assertSetEqual(set1, set2)
set1 = set(['a'])
set2 = set(['a', 'b'])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a'])
set2 = frozenset(['a', 'b'])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a', 'b'])
set2 = frozenset(['a', 'b'])
self.assertSetEqual(set1, set2)
set1 = set()
set2 = "foo"
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
self.assertRaises(self.failureException, self.assertSetEqual, set2, set1)
# make sure any string formatting is tuple-safe
set1 = set([(0, 1), (2, 3)])
set2 = set([(4, 5)])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
def testInequality(self):
# Try ints
self.assertGreater(2, 1)
self.assertGreaterEqual(2, 1)
self.assertGreaterEqual(1, 1)
self.assertLess(1, 2)
self.assertLessEqual(1, 2)
self.assertLessEqual(1, 1)
self.assertRaises(self.failureException, self.assertGreater, 1, 2)
self.assertRaises(self.failureException, self.assertGreater, 1, 1)
self.assertRaises(self.failureException, self.assertGreaterEqual, 1, 2)
self.assertRaises(self.failureException, self.assertLess, 2, 1)
self.assertRaises(self.failureException, self.assertLess, 1, 1)
self.assertRaises(self.failureException, self.assertLessEqual, 2, 1)
# Try Floats
self.assertGreater(1.1, 1.0)
self.assertGreaterEqual(1.1, 1.0)
self.assertGreaterEqual(1.0, 1.0)
self.assertLess(1.0, 1.1)
self.assertLessEqual(1.0, 1.1)
self.assertLessEqual(1.0, 1.0)
self.assertRaises(self.failureException, self.assertGreater, 1.0, 1.1)
self.assertRaises(self.failureException, self.assertGreater, 1.0, 1.0)
self.assertRaises(self.failureException, self.assertGreaterEqual, 1.0, 1.1)
self.assertRaises(self.failureException, self.assertLess, 1.1, 1.0)
self.assertRaises(self.failureException, self.assertLess, 1.0, 1.0)
self.assertRaises(self.failureException, self.assertLessEqual, 1.1, 1.0)
# Try Strings
self.assertGreater('bug', 'ant')
self.assertGreaterEqual('bug', 'ant')
self.assertGreaterEqual('ant', 'ant')
self.assertLess('ant', 'bug')
self.assertLessEqual('ant', 'bug')
self.assertLessEqual('ant', 'ant')
self.assertRaises(self.failureException, self.assertGreater, 'ant', 'bug')
self.assertRaises(self.failureException, self.assertGreater, 'ant', 'ant')
self.assertRaises(self.failureException, self.assertGreaterEqual, 'ant', 'bug')
self.assertRaises(self.failureException, self.assertLess, 'bug', 'ant')
self.assertRaises(self.failureException, self.assertLess, 'ant', 'ant')
self.assertRaises(self.failureException, self.assertLessEqual, 'bug', 'ant')
# Try bytes
self.assertGreater(b'bug', b'ant')
self.assertGreaterEqual(b'bug', b'ant')
self.assertGreaterEqual(b'ant', b'ant')
self.assertLess(b'ant', b'bug')
self.assertLessEqual(b'ant', b'bug')
self.assertLessEqual(b'ant', b'ant')
self.assertRaises(self.failureException, self.assertGreater, b'ant', b'bug')
self.assertRaises(self.failureException, self.assertGreater, b'ant', b'ant')
self.assertRaises(self.failureException, self.assertGreaterEqual, b'ant',
b'bug')
self.assertRaises(self.failureException, self.assertLess, b'bug', b'ant')
self.assertRaises(self.failureException, self.assertLess, b'ant', b'ant')
self.assertRaises(self.failureException, self.assertLessEqual, b'bug', b'ant')
def testAssertMultiLineEqual(self):
sample_text = """\
http://www.python.org/doc/2.3/lib/module-unittest.html
test case
A test case is the smallest unit of testing. [...]
"""
revised_sample_text = """\
http://www.python.org/doc/2.4.1/lib/module-unittest.html
test case
A test case is the smallest unit of testing. [...] You may provide your
own implementation that does not subclass from TestCase, of course.
"""
sample_text_error = """\
- http://www.python.org/doc/2.3/lib/module-unittest.html
? ^
+ http://www.python.org/doc/2.4.1/lib/module-unittest.html
? ^^^
test case
- A test case is the smallest unit of testing. [...]
+ A test case is the smallest unit of testing. [...] You may provide your
? +++++++++++++++++++++
+ own implementation that does not subclass from TestCase, of course.
"""
self.maxDiff = None
try:
self.assertMultiLineEqual(sample_text, revised_sample_text)
except self.failureException as e:
# need to remove the first line of the error message
error = str(e).split('\n', 1)[1]
self.assertEqual(sample_text_error, error)
def testAssertEqualSingleLine(self):
sample_text = "laden swallows fly slowly"
revised_sample_text = "unladen swallows fly quickly"
sample_text_error = """\
- laden swallows fly slowly
? ^^^^
+ unladen swallows fly quickly
? ++ ^^^^^
"""
try:
self.assertEqual(sample_text, revised_sample_text)
except self.failureException as e:
# need to remove the first line of the error message
error = str(e).split('\n', 1)[1]
self.assertEqual(sample_text_error, error)
def testEqualityBytesWarning(self):
if sys.flags.bytes_warning:
def bytes_warning():
return self.assertWarnsRegex(BytesWarning,
'Comparison between bytes and string')
else:
def bytes_warning():
return contextlib.ExitStack()
with bytes_warning(), self.assertRaises(self.failureException):
self.assertEqual('a', b'a')
with bytes_warning():
self.assertNotEqual('a', b'a')
a = [0, 'a']
b = [0, b'a']
with bytes_warning(), self.assertRaises(self.failureException):
self.assertListEqual(a, b)
with bytes_warning(), self.assertRaises(self.failureException):
self.assertTupleEqual(tuple(a), tuple(b))
with bytes_warning(), self.assertRaises(self.failureException):
self.assertSequenceEqual(a, tuple(b))
with bytes_warning(), self.assertRaises(self.failureException):
self.assertSequenceEqual(tuple(a), b)
with bytes_warning(), self.assertRaises(self.failureException):
self.assertSequenceEqual('a', b'a')
with bytes_warning(), self.assertRaises(self.failureException):
self.assertSetEqual(set(a), set(b))
with self.assertRaises(self.failureException):
self.assertListEqual(a, tuple(b))
with self.assertRaises(self.failureException):
self.assertTupleEqual(tuple(a), b)
a = [0, b'a']
b = [0]
with self.assertRaises(self.failureException):
self.assertListEqual(a, b)
with self.assertRaises(self.failureException):
self.assertTupleEqual(tuple(a), tuple(b))
with self.assertRaises(self.failureException):
self.assertSequenceEqual(a, tuple(b))
with self.assertRaises(self.failureException):
self.assertSequenceEqual(tuple(a), b)
with self.assertRaises(self.failureException):
self.assertSetEqual(set(a), set(b))
a = [0]
b = [0, b'a']
with self.assertRaises(self.failureException):
self.assertListEqual(a, b)
with self.assertRaises(self.failureException):
self.assertTupleEqual(tuple(a), tuple(b))
with self.assertRaises(self.failureException):
self.assertSequenceEqual(a, tuple(b))
with self.assertRaises(self.failureException):
self.assertSequenceEqual(tuple(a), b)
with self.assertRaises(self.failureException):
self.assertSetEqual(set(a), set(b))
with bytes_warning(), self.assertRaises(self.failureException):
self.assertDictEqual({'a': 0}, {b'a': 0})
with self.assertRaises(self.failureException):
self.assertDictEqual({}, {b'a': 0})
with self.assertRaises(self.failureException):
self.assertDictEqual({b'a': 0}, {})
with self.assertRaises(self.failureException):
self.assertCountEqual([b'a', b'a'], [b'a', b'a', b'a'])
with bytes_warning():
self.assertCountEqual(['a', b'a'], ['a', b'a'])
with bytes_warning(), self.assertRaises(self.failureException):
self.assertCountEqual(['a', 'a'], [b'a', b'a'])
with bytes_warning(), self.assertRaises(self.failureException):
self.assertCountEqual(['a', 'a', []], [b'a', b'a', []])
def testAssertIsNone(self):
self.assertIsNone(None)
self.assertRaises(self.failureException, self.assertIsNone, False)
self.assertIsNotNone('DjZoPloGears on Rails')
self.assertRaises(self.failureException, self.assertIsNotNone, None)
def testAssertRegex(self):
self.assertRegex('asdfabasdf', r'ab+')
self.assertRaises(self.failureException, self.assertRegex,
'saaas', r'aaaa')
def testAssertRaisesCallable(self):
class ExceptionMock(Exception):
pass
def Stub():
raise ExceptionMock('We expect')
self.assertRaises(ExceptionMock, Stub)
# A tuple of exception classes is accepted
self.assertRaises((ValueError, ExceptionMock), Stub)
# *args and **kwargs also work
self.assertRaises(ValueError, int, '19', base=8)
# Failure when no exception is raised
with self.assertRaises(self.failureException):
self.assertRaises(ExceptionMock, lambda: 0)
# Failure when the function is None
with self.assertWarns(DeprecationWarning):
self.assertRaises(ExceptionMock, None)
# Failure when another exception is raised
with self.assertRaises(ExceptionMock):
self.assertRaises(ValueError, Stub)
def testAssertRaisesContext(self):
class ExceptionMock(Exception):
pass
def Stub():
raise ExceptionMock('We expect')
with self.assertRaises(ExceptionMock):
Stub()
# A tuple of exception classes is accepted
with self.assertRaises((ValueError, ExceptionMock)) as cm:
Stub()
# The context manager exposes caught exception
self.assertIsInstance(cm.exception, ExceptionMock)
self.assertEqual(cm.exception.args[0], 'We expect')
# *args and **kwargs also work
with self.assertRaises(ValueError):
int('19', base=8)
# Failure when no exception is raised
with self.assertRaises(self.failureException):
with self.assertRaises(ExceptionMock):
pass
# Custom message
with self.assertRaisesRegex(self.failureException, 'foobar'):
with self.assertRaises(ExceptionMock, msg='foobar'):
pass
# Invalid keyword argument
with self.assertWarnsRegex(DeprecationWarning, 'foobar'), \
self.assertRaises(AssertionError):
with self.assertRaises(ExceptionMock, foobar=42):
pass
# Failure when another exception is raised
with self.assertRaises(ExceptionMock):
self.assertRaises(ValueError, Stub)
def testAssertRaisesNoExceptionType(self):
with self.assertRaises(TypeError):
self.assertRaises()
with self.assertRaises(TypeError):
self.assertRaises(1)
with self.assertRaises(TypeError):
self.assertRaises(object)
with self.assertRaises(TypeError):
self.assertRaises((ValueError, 1))
with self.assertRaises(TypeError):
self.assertRaises((ValueError, object))
def testAssertRaisesRefcount(self):
# bpo-23890: assertRaises() must not keep objects alive longer
# than expected
def func() :
try:
raise ValueError
except ValueError:
raise ValueError
refcount = sys.getrefcount(func)
self.assertRaises(ValueError, func)
self.assertEqual(refcount, sys.getrefcount(func))
def testAssertRaisesRegex(self):
class ExceptionMock(Exception):
pass
def Stub():
raise ExceptionMock('We expect')
self.assertRaisesRegex(ExceptionMock, re.compile('expect$'), Stub)
self.assertRaisesRegex(ExceptionMock, 'expect$', Stub)
with self.assertWarns(DeprecationWarning):
self.assertRaisesRegex(ExceptionMock, 'expect$', None)
def testAssertNotRaisesRegex(self):
self.assertRaisesRegex(
self.failureException, '^Exception not raised by <lambda>$',
self.assertRaisesRegex, Exception, re.compile('x'),
lambda: None)
self.assertRaisesRegex(
self.failureException, '^Exception not raised by <lambda>$',
self.assertRaisesRegex, Exception, 'x',
lambda: None)
# Custom message
with self.assertRaisesRegex(self.failureException, 'foobar'):
with self.assertRaisesRegex(Exception, 'expect', msg='foobar'):
pass
# Invalid keyword argument
with self.assertWarnsRegex(DeprecationWarning, 'foobar'), \
self.assertRaises(AssertionError):
with self.assertRaisesRegex(Exception, 'expect', foobar=42):
pass
def testAssertRaisesRegexInvalidRegex(self):
# Issue 20145.
class MyExc(Exception):
pass
self.assertRaises(TypeError, self.assertRaisesRegex, MyExc, lambda: True)
def testAssertWarnsRegexInvalidRegex(self):
# Issue 20145.
class MyWarn(Warning):
pass
self.assertRaises(TypeError, self.assertWarnsRegex, MyWarn, lambda: True)
def testAssertRaisesRegexMismatch(self):
def Stub():
raise Exception('Unexpected')
self.assertRaisesRegex(
self.failureException,
r'"\^Expected\$" does not match "Unexpected"',
self.assertRaisesRegex, Exception, '^Expected$',
Stub)
self.assertRaisesRegex(
self.failureException,
r'"\^Expected\$" does not match "Unexpected"',
self.assertRaisesRegex, Exception,
re.compile('^Expected$'), Stub)
def testAssertRaisesExcValue(self):
class ExceptionMock(Exception):
pass
def Stub(foo):
raise ExceptionMock(foo)
v = "particular value"
ctx = self.assertRaises(ExceptionMock)
with ctx:
Stub(v)
e = ctx.exception
self.assertIsInstance(e, ExceptionMock)
self.assertEqual(e.args[0], v)
def testAssertRaisesRegexNoExceptionType(self):
with self.assertRaises(TypeError):
self.assertRaisesRegex()
with self.assertRaises(TypeError):
self.assertRaisesRegex(ValueError)
with self.assertRaises(TypeError):
self.assertRaisesRegex(1, 'expect')
with self.assertRaises(TypeError):
self.assertRaisesRegex(object, 'expect')
with self.assertRaises(TypeError):
self.assertRaisesRegex((ValueError, 1), 'expect')
with self.assertRaises(TypeError):
self.assertRaisesRegex((ValueError, object), 'expect')
def testAssertWarnsCallable(self):
def _runtime_warn():
warnings.warn("foo", RuntimeWarning)
# Success when the right warning is triggered, even several times
self.assertWarns(RuntimeWarning, _runtime_warn)
self.assertWarns(RuntimeWarning, _runtime_warn)
# A tuple of warning classes is accepted
self.assertWarns((DeprecationWarning, RuntimeWarning), _runtime_warn)
# *args and **kwargs also work
self.assertWarns(RuntimeWarning,
warnings.warn, "foo", category=RuntimeWarning)
# Failure when no warning is triggered
with self.assertRaises(self.failureException):
self.assertWarns(RuntimeWarning, lambda: 0)
# Failure when the function is None
with self.assertWarns(DeprecationWarning):
self.assertWarns(RuntimeWarning, None)
# Failure when another warning is triggered
with warnings.catch_warnings():
# Force default filter (in case tests are run with -We)
warnings.simplefilter("default", RuntimeWarning)
with self.assertRaises(self.failureException):
self.assertWarns(DeprecationWarning, _runtime_warn)
# Filters for other warnings are not modified
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
with self.assertRaises(RuntimeWarning):
self.assertWarns(DeprecationWarning, _runtime_warn)
def testAssertWarnsContext(self):
# Believe it or not, it is preferable to duplicate all tests above,
# to make sure the __warningregistry__ $@ is circumvented correctly.
def _runtime_warn():
warnings.warn("foo", RuntimeWarning)
_runtime_warn_lineno = inspect.getsourcelines(_runtime_warn)[1]
with self.assertWarns(RuntimeWarning) as cm:
_runtime_warn()
# A tuple of warning classes is accepted
with self.assertWarns((DeprecationWarning, RuntimeWarning)) as cm:
_runtime_warn()
# The context manager exposes various useful attributes
self.assertIsInstance(cm.warning, RuntimeWarning)
self.assertEqual(cm.warning.args[0], "foo")
self.assertIn("test_case.py", cm.filename)
self.assertEqual(cm.lineno, _runtime_warn_lineno + 1)
# Same with several warnings
with self.assertWarns(RuntimeWarning):
_runtime_warn()
_runtime_warn()
with self.assertWarns(RuntimeWarning):
warnings.warn("foo", category=RuntimeWarning)
# Failure when no warning is triggered
with self.assertRaises(self.failureException):
with self.assertWarns(RuntimeWarning):
pass
# Custom message
with self.assertRaisesRegex(self.failureException, 'foobar'):
with self.assertWarns(RuntimeWarning, msg='foobar'):
pass
# Invalid keyword argument
with self.assertWarnsRegex(DeprecationWarning, 'foobar'), \
self.assertRaises(AssertionError):
with self.assertWarns(RuntimeWarning, foobar=42):
pass
# Failure when another warning is triggered
with warnings.catch_warnings():
# Force default filter (in case tests are run with -We)
warnings.simplefilter("default", RuntimeWarning)
with self.assertRaises(self.failureException):
with self.assertWarns(DeprecationWarning):
_runtime_warn()
# Filters for other warnings are not modified
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
with self.assertRaises(RuntimeWarning):
with self.assertWarns(DeprecationWarning):
_runtime_warn()
def testAssertWarnsNoExceptionType(self):
with self.assertRaises(TypeError):
self.assertWarns()
with self.assertRaises(TypeError):
self.assertWarns(1)
with self.assertRaises(TypeError):
self.assertWarns(object)
with self.assertRaises(TypeError):
self.assertWarns((UserWarning, 1))
with self.assertRaises(TypeError):
self.assertWarns((UserWarning, object))
with self.assertRaises(TypeError):
self.assertWarns((UserWarning, Exception))
def testAssertWarnsRegexCallable(self):
def _runtime_warn(msg):
warnings.warn(msg, RuntimeWarning)
self.assertWarnsRegex(RuntimeWarning, "o+",
_runtime_warn, "foox")
# Failure when no warning is triggered
with self.assertRaises(self.failureException):
self.assertWarnsRegex(RuntimeWarning, "o+",
lambda: 0)
# Failure when the function is None
with self.assertWarns(DeprecationWarning):
self.assertWarnsRegex(RuntimeWarning, "o+", None)
# Failure when another warning is triggered
with warnings.catch_warnings():
# Force default filter (in case tests are run with -We)
warnings.simplefilter("default", RuntimeWarning)
with self.assertRaises(self.failureException):
self.assertWarnsRegex(DeprecationWarning, "o+",
_runtime_warn, "foox")
# Failure when message doesn't match
with self.assertRaises(self.failureException):
self.assertWarnsRegex(RuntimeWarning, "o+",
_runtime_warn, "barz")
# A little trickier: we ask RuntimeWarnings to be raised, and then
# check for some of them. It is implementation-defined whether
# non-matching RuntimeWarnings are simply re-raised, or produce a
# failureException.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
with self.assertRaises((RuntimeWarning, self.failureException)):
self.assertWarnsRegex(RuntimeWarning, "o+",
_runtime_warn, "barz")
def testAssertWarnsRegexContext(self):
# Same as above, but with assertWarnsRegex as a context manager
def _runtime_warn(msg):
warnings.warn(msg, RuntimeWarning)
_runtime_warn_lineno = inspect.getsourcelines(_runtime_warn)[1]
with self.assertWarnsRegex(RuntimeWarning, "o+") as cm:
_runtime_warn("foox")
self.assertIsInstance(cm.warning, RuntimeWarning)
self.assertEqual(cm.warning.args[0], "foox")
self.assertIn("test_case.py", cm.filename)
self.assertEqual(cm.lineno, _runtime_warn_lineno + 1)
# Failure when no warning is triggered
with self.assertRaises(self.failureException):
with self.assertWarnsRegex(RuntimeWarning, "o+"):
pass
# Custom message
with self.assertRaisesRegex(self.failureException, 'foobar'):
with self.assertWarnsRegex(RuntimeWarning, 'o+', msg='foobar'):
pass
# Invalid keyword argument
with self.assertWarnsRegex(DeprecationWarning, 'foobar'), \
self.assertRaises(AssertionError):
with self.assertWarnsRegex(RuntimeWarning, 'o+', foobar=42):
pass
# Failure when another warning is triggered
with warnings.catch_warnings():
# Force default filter (in case tests are run with -We)
warnings.simplefilter("default", RuntimeWarning)
with self.assertRaises(self.failureException):
with self.assertWarnsRegex(DeprecationWarning, "o+"):
_runtime_warn("foox")
# Failure when message doesn't match
with self.assertRaises(self.failureException):
with self.assertWarnsRegex(RuntimeWarning, "o+"):
_runtime_warn("barz")
# A little trickier: we ask RuntimeWarnings to be raised, and then
# check for some of them. It is implementation-defined whether
# non-matching RuntimeWarnings are simply re-raised, or produce a
# failureException.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
with self.assertRaises((RuntimeWarning, self.failureException)):
with self.assertWarnsRegex(RuntimeWarning, "o+"):
_runtime_warn("barz")
def testAssertWarnsRegexNoExceptionType(self):
with self.assertRaises(TypeError):
self.assertWarnsRegex()
with self.assertRaises(TypeError):
self.assertWarnsRegex(UserWarning)
with self.assertRaises(TypeError):
self.assertWarnsRegex(1, 'expect')
with self.assertRaises(TypeError):
self.assertWarnsRegex(object, 'expect')
with self.assertRaises(TypeError):
self.assertWarnsRegex((UserWarning, 1), 'expect')
with self.assertRaises(TypeError):
self.assertWarnsRegex((UserWarning, object), 'expect')
with self.assertRaises(TypeError):
self.assertWarnsRegex((UserWarning, Exception), 'expect')
@contextlib.contextmanager
def assertNoStderr(self):
with captured_stderr() as buf:
yield
self.assertEqual(buf.getvalue(), "")
def assertLogRecords(self, records, matches):
self.assertEqual(len(records), len(matches))
for rec, match in zip(records, matches):
self.assertIsInstance(rec, logging.LogRecord)
for k, v in match.items():
self.assertEqual(getattr(rec, k), v)
def testAssertLogsDefaults(self):
# defaults: root logger, level INFO
with self.assertNoStderr():
with self.assertLogs() as cm:
log_foo.info("1")
log_foobar.debug("2")
self.assertEqual(cm.output, ["INFO:foo:1"])
self.assertLogRecords(cm.records, [{'name': 'foo'}])
def testAssertLogsTwoMatchingMessages(self):
# Same, but with two matching log messages
with self.assertNoStderr():
with self.assertLogs() as cm:
log_foo.info("1")
log_foobar.debug("2")
log_quux.warning("3")
self.assertEqual(cm.output, ["INFO:foo:1", "WARNING:quux:3"])
self.assertLogRecords(cm.records,
[{'name': 'foo'}, {'name': 'quux'}])
def checkAssertLogsPerLevel(self, level):
# Check level filtering
with self.assertNoStderr():
with self.assertLogs(level=level) as cm:
log_foo.warning("1")
log_foobar.error("2")
log_quux.critical("3")
self.assertEqual(cm.output, ["ERROR:foo.bar:2", "CRITICAL:quux:3"])
self.assertLogRecords(cm.records,
[{'name': 'foo.bar'}, {'name': 'quux'}])
def testAssertLogsPerLevel(self):
self.checkAssertLogsPerLevel(logging.ERROR)
self.checkAssertLogsPerLevel('ERROR')
def checkAssertLogsPerLogger(self, logger):
# Check per-logger filtering
with self.assertNoStderr():
with self.assertLogs(level='DEBUG') as outer_cm:
with self.assertLogs(logger, level='DEBUG') as cm:
log_foo.info("1")
log_foobar.debug("2")
log_quux.warning("3")
self.assertEqual(cm.output, ["INFO:foo:1", "DEBUG:foo.bar:2"])
self.assertLogRecords(cm.records,
[{'name': 'foo'}, {'name': 'foo.bar'}])
# The outer catchall caught the quux log
self.assertEqual(outer_cm.output, ["WARNING:quux:3"])
def testAssertLogsPerLogger(self):
self.checkAssertLogsPerLogger(logging.getLogger('foo'))
self.checkAssertLogsPerLogger('foo')
def testAssertLogsFailureNoLogs(self):
# Failure due to no logs
with self.assertNoStderr():
with self.assertRaises(self.failureException):
with self.assertLogs():
pass
def testAssertLogsFailureLevelTooHigh(self):
# Failure due to level too high
with self.assertNoStderr():
with self.assertRaises(self.failureException):
with self.assertLogs(level='WARNING'):
log_foo.info("1")
def testAssertLogsFailureMismatchingLogger(self):
# Failure due to mismatching logger (and the logged message is
# passed through)
with self.assertLogs('quux', level='ERROR'):
with self.assertRaises(self.failureException):
with self.assertLogs('foo'):
log_quux.error("1")
def testDeprecatedMethodNames(self):
"""
Test that the deprecated methods raise a DeprecationWarning. See #9424.
"""
old = (
(self.failIfEqual, (3, 5)),
(self.assertNotEquals, (3, 5)),
(self.failUnlessEqual, (3, 3)),
(self.assertEquals, (3, 3)),
(self.failUnlessAlmostEqual, (2.0, 2.0)),
(self.assertAlmostEquals, (2.0, 2.0)),
(self.failIfAlmostEqual, (3.0, 5.0)),
(self.assertNotAlmostEquals, (3.0, 5.0)),
(self.failUnless, (True,)),
(self.assert_, (True,)),
(self.failUnlessRaises, (TypeError, lambda _: 3.14 + 'spam')),
(self.failIf, (False,)),
(self.assertDictContainsSubset, (dict(a=1, b=2), dict(a=1, b=2, c=3))),
(self.assertRaisesRegexp, (KeyError, 'foo', lambda: {}['foo'])),
(self.assertRegexpMatches, ('bar', 'bar')),
)
for meth, args in old:
with self.assertWarns(DeprecationWarning):
meth(*args)
# disable this test for now. When the version where the fail* methods will
# be removed is decided, re-enable it and update the version
def _testDeprecatedFailMethods(self):
"""Test that the deprecated fail* methods get removed in 3.x"""
if sys.version_info[:2] < (3, 3):
return
deprecated_names = [
'failIfEqual', 'failUnlessEqual', 'failUnlessAlmostEqual',
'failIfAlmostEqual', 'failUnless', 'failUnlessRaises', 'failIf',
'assertDictContainsSubset',
]
for deprecated_name in deprecated_names:
with self.assertRaises(AttributeError):
getattr(self, deprecated_name) # remove these in 3.x
def testDeepcopy(self):
# Issue: 5660
class TestableTest(unittest.TestCase):
def testNothing(self):
pass
test = TestableTest('testNothing')
# This shouldn't blow up
deepcopy(test)
def testPickle(self):
# Issue 10326
# Can't use TestCase classes defined in Test class as
# pickle does not work with inner classes
test = unittest.TestCase('run')
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
# blew up prior to fix
pickled_test = pickle.dumps(test, protocol=protocol)
unpickled_test = pickle.loads(pickled_test)
self.assertEqual(test, unpickled_test)
# exercise the TestCase instance in a way that will invoke
# the type equality lookup mechanism
unpickled_test.assertEqual(set(), set())
def testKeyboardInterrupt(self):
def _raise(self=None):
raise KeyboardInterrupt
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _raise
class Test2(unittest.TestCase):
setUp = _raise
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _raise
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_raise)
for klass in (Test1, Test2, Test3, Test4):
with self.assertRaises(KeyboardInterrupt):
klass('test_something').run()
def testSkippingEverywhere(self):
def _skip(self=None):
raise unittest.SkipTest('some reason')
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _skip
class Test2(unittest.TestCase):
setUp = _skip
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _skip
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_skip)
for klass in (Test1, Test2, Test3, Test4):
result = unittest.TestResult()
klass('test_something').run(result)
self.assertEqual(len(result.skipped), 1)
self.assertEqual(result.testsRun, 1)
def testSystemExit(self):
def _raise(self=None):
raise SystemExit
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _raise
class Test2(unittest.TestCase):
setUp = _raise
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _raise
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_raise)
for klass in (Test1, Test2, Test3, Test4):
result = unittest.TestResult()
klass('test_something').run(result)
self.assertEqual(len(result.errors), 1)
self.assertEqual(result.testsRun, 1)
@support.cpython_only
def testNoCycles(self):
case = unittest.TestCase()
wr = weakref.ref(case)
with support.disable_gc():
del case
self.assertFalse(wr())
def test_no_exception_leak(self):
# Issue #19880: TestCase.run() should not keep a reference
# to the exception
class MyException(Exception):
ninstance = 0
def __init__(self):
MyException.ninstance += 1
Exception.__init__(self)
def __del__(self):
MyException.ninstance -= 1
class TestCase(unittest.TestCase):
def test1(self):
raise MyException()
@unittest.expectedFailure
def test2(self):
raise MyException()
for method_name in ('test1', 'test2'):
testcase = TestCase(method_name)
testcase.run()
self.assertEqual(MyException.ninstance, 0)
if __name__ == "__main__":
unittest.main()
|
syci/OCB | refs/heads/9.0 | addons/procurement/__openerp__.py | 27 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name' : 'Procurements',
'version' : '1.0',
'website': 'https://www.odoo.com/page/manufacturing',
'category' : 'Hidden/Dependency',
'depends' : ['base', 'product'],
'description': """
This is the module for computing Procurements.
==============================================
This procurement module only depends on the product module and is not useful
on itself. Procurements represent needs that need to be solved by a procurement
rule. When a procurement is created, it is confirmed. When a rule is found,
it will be put in running state. After, it will check if what needed to be done
for the rule has been executed. Then it will go to the done state. A procurement
can also go into exception, for example when it can not find a rule and it can be cancelled.
The mechanism will be extended by several modules. The procurement rule of stock will
create a move and the procurement will be fulfilled when the move is done.
The procurement rule of sale_service will create a task. Those of purchase or
mrp will create a purchase order or a manufacturing order.
The scheduler will check if it can assign a rule to confirmed procurements and if
it can put running procurements to done.
Procurements in exception should be checked manually and can be re-run.
""",
'data': [
'security/ir.model.access.csv',
'security/procurement_security.xml',
'procurement_data.xml',
'wizard/schedulers_all_view.xml',
'procurement_view.xml',
'company_view.xml',
'product_product_view.xml',
],
'demo': [],
'test': ['test/procurement.yml'],
'installable': True,
'auto_install': True,
}
|
Bysmyyr/chromium-crosswalk | refs/heads/master | tools/telemetry/third_party/gsutilz/third_party/boto/tests/integration/beanstalk/test_wrapper.py | 114 | import random
import time
from functools import partial
from tests.compat import unittest
from boto.beanstalk.wrapper import Layer1Wrapper
import boto.beanstalk.response as response
class BasicSuite(unittest.TestCase):
def setUp(self):
self.random_id = str(random.randint(1, 1000000))
self.app_name = 'app-' + self.random_id
self.app_version = 'version-' + self.random_id
self.template = 'template-' + self.random_id
self.environment = 'environment-' + self.random_id
self.beanstalk = Layer1Wrapper()
class MiscSuite(BasicSuite):
def test_check_dns_availability(self):
result = self.beanstalk.check_dns_availability('amazon')
self.assertIsInstance(result, response.CheckDNSAvailabilityResponse,
'incorrect response object returned')
self.assertFalse(result.available)
class TestApplicationObjects(BasicSuite):
def create_application(self):
# This method is used for any API calls that require an application
# object. This also adds a cleanup step to automatically delete the
# app when the test is finished. No assertions are performed
# here. If you want to validate create_application, don't use this
# method.
self.beanstalk.create_application(application_name=self.app_name)
self.addCleanup(partial(self.beanstalk.delete_application,
application_name=self.app_name))
def test_create_delete_application_version(self):
# This will create an app, create an app version, delete the app
# version, and delete the app. For each API call we check that the
# return type is what we expect and that a few attributes have the
# correct values.
app_result = self.beanstalk.create_application(application_name=self.app_name)
self.assertIsInstance(app_result, response.CreateApplicationResponse)
self.assertEqual(app_result.application.application_name, self.app_name)
version_result = self.beanstalk.create_application_version(
application_name=self.app_name, version_label=self.app_version)
self.assertIsInstance(version_result, response.CreateApplicationVersionResponse)
self.assertEqual(version_result.application_version.version_label,
self.app_version)
result = self.beanstalk.delete_application_version(
application_name=self.app_name, version_label=self.app_version)
self.assertIsInstance(result, response.DeleteApplicationVersionResponse)
result = self.beanstalk.delete_application(
application_name=self.app_name
)
self.assertIsInstance(result, response.DeleteApplicationResponse)
def test_create_configuration_template(self):
self.create_application()
result = self.beanstalk.create_configuration_template(
application_name=self.app_name, template_name=self.template,
solution_stack_name='32bit Amazon Linux running Tomcat 6')
self.assertIsInstance(
result, response.CreateConfigurationTemplateResponse)
self.assertEqual(result.solution_stack_name,
'32bit Amazon Linux running Tomcat 6')
def test_create_storage_location(self):
result = self.beanstalk.create_storage_location()
self.assertIsInstance(result, response.CreateStorageLocationResponse)
def test_update_application(self):
self.create_application()
result = self.beanstalk.update_application(application_name=self.app_name)
self.assertIsInstance(result, response.UpdateApplicationResponse)
def test_update_application_version(self):
self.create_application()
self.beanstalk.create_application_version(
application_name=self.app_name, version_label=self.app_version)
result = self.beanstalk.update_application_version(
application_name=self.app_name, version_label=self.app_version)
self.assertIsInstance(
result, response.UpdateApplicationVersionResponse)
class GetSuite(BasicSuite):
def test_describe_applications(self):
result = self.beanstalk.describe_applications()
self.assertIsInstance(result, response.DescribeApplicationsResponse)
def test_describe_application_versions(self):
result = self.beanstalk.describe_application_versions()
self.assertIsInstance(result,
response.DescribeApplicationVersionsResponse)
def test_describe_configuration_options(self):
result = self.beanstalk.describe_configuration_options()
self.assertIsInstance(result,
response.DescribeConfigurationOptionsResponse)
def test_12_describe_environments(self):
result = self.beanstalk.describe_environments()
self.assertIsInstance(
result, response.DescribeEnvironmentsResponse)
def test_14_describe_events(self):
result = self.beanstalk.describe_events()
self.assertIsInstance(result, response.DescribeEventsResponse)
def test_15_list_available_solution_stacks(self):
result = self.beanstalk.list_available_solution_stacks()
self.assertIsInstance(
result, response.ListAvailableSolutionStacksResponse)
self.assertIn('32bit Amazon Linux running Tomcat 6',
result.solution_stacks)
class TestsWithEnvironment(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.random_id = str(random.randint(1, 1000000))
cls.app_name = 'app-' + cls.random_id
cls.environment = 'environment-' + cls.random_id
cls.template = 'template-' + cls.random_id
cls.beanstalk = Layer1Wrapper()
cls.beanstalk.create_application(application_name=cls.app_name)
cls.beanstalk.create_configuration_template(
application_name=cls.app_name, template_name=cls.template,
solution_stack_name='32bit Amazon Linux running Tomcat 6')
cls.app_version = 'version-' + cls.random_id
cls.beanstalk.create_application_version(
application_name=cls.app_name, version_label=cls.app_version)
cls.beanstalk.create_environment(cls.app_name, cls.environment,
template_name=cls.template)
cls.wait_for_env(cls.environment)
@classmethod
def tearDownClass(cls):
cls.beanstalk.delete_application(application_name=cls.app_name,
terminate_env_by_force=True)
cls.wait_for_env(cls.environment, 'Terminated')
@classmethod
def wait_for_env(cls, env_name, status='Ready'):
while not cls.env_ready(env_name, status):
time.sleep(15)
@classmethod
def env_ready(cls, env_name, desired_status):
result = cls.beanstalk.describe_environments(
application_name=cls.app_name, environment_names=env_name)
status = result.environments[0].status
return status == desired_status
def test_describe_environment_resources(self):
result = self.beanstalk.describe_environment_resources(
environment_name=self.environment)
self.assertIsInstance(
result, response.DescribeEnvironmentResourcesResponse)
def test_describe_configuration_settings(self):
result = self.beanstalk.describe_configuration_settings(
application_name=self.app_name, environment_name=self.environment)
self.assertIsInstance(
result, response.DescribeConfigurationSettingsResponse)
def test_request_environment_info(self):
result = self.beanstalk.request_environment_info(
environment_name=self.environment, info_type='tail')
self.assertIsInstance(result, response.RequestEnvironmentInfoResponse)
self.wait_for_env(self.environment)
result = self.beanstalk.retrieve_environment_info(
environment_name=self.environment, info_type='tail')
self.assertIsInstance(result, response.RetrieveEnvironmentInfoResponse)
def test_rebuild_environment(self):
result = self.beanstalk.rebuild_environment(
environment_name=self.environment)
self.assertIsInstance(result, response.RebuildEnvironmentResponse)
self.wait_for_env(self.environment)
def test_restart_app_server(self):
result = self.beanstalk.restart_app_server(
environment_name=self.environment)
self.assertIsInstance(result, response.RestartAppServerResponse)
self.wait_for_env(self.environment)
def test_update_configuration_template(self):
result = self.beanstalk.update_configuration_template(
application_name=self.app_name, template_name=self.template)
self.assertIsInstance(
result, response.UpdateConfigurationTemplateResponse)
def test_update_environment(self):
result = self.beanstalk.update_environment(
environment_name=self.environment)
self.assertIsInstance(result, response.UpdateEnvironmentResponse)
self.wait_for_env(self.environment)
if __name__ == '__main__':
unittest.main()
|
calfonso/ansible | refs/heads/devel | test/units/modules/network/f5/test_bigip_virtual_address.py | 7 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import Mock
from ansible.compat.tests.mock import patch
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_virtual_address import Parameters
from library.modules.bigip_virtual_address import ModuleManager
from library.modules.bigip_virtual_address import ArgumentSpec
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_virtual_address import Parameters
from ansible.modules.network.f5.bigip_virtual_address import ModuleManager
from ansible.modules.network.f5.bigip_virtual_address import ArgumentSpec
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
state='present',
address='1.1.1.1',
netmask='2.2.2.2',
connection_limit='10',
arp_state='enabled',
auto_delete='enabled',
icmp_echo='enabled',
advertise_route='always',
use_route_advertisement='yes'
)
p = Parameters(params=args)
assert p.state == 'present'
assert p.address == '1.1.1.1'
assert p.netmask == '2.2.2.2'
assert p.connection_limit == 10
assert p.arp_state == 'enabled'
assert p.auto_delete is True
assert p.icmp_echo == 'enabled'
assert p.advertise_route == 'none'
assert p.use_route_advertisement == 'enabled'
def test_api_parameters(self):
args = load_fixture('load_ltm_virtual_address_default.json')
p = Parameters(params=args)
assert p.name == '1.1.1.1'
assert p.address == '1.1.1.1'
assert p.arp_state == 'enabled'
assert p.auto_delete is True
assert p.connection_limit == 0
assert p.state == 'enabled'
assert p.icmp_echo == 'enabled'
assert p.netmask == '255.255.255.255'
assert p.use_route_advertisement == 'disabled'
assert p.advertise_route == 'any'
def test_module_parameters_advertise_route_all(self):
args = dict(
advertise_route='when_all_available'
)
p = Parameters(params=args)
assert p.advertise_route == 'all'
def test_module_parameters_advertise_route_any(self):
args = dict(
advertise_route='when_any_available'
)
p = Parameters(params=args)
assert p.advertise_route == 'any'
def test_module_parameters_icmp_echo_disabled(self):
args = dict(
icmp_echo='disabled'
)
p = Parameters(params=args)
assert p.icmp_echo == 'disabled'
def test_module_parameters_icmp_echo_selective(self):
args = dict(
icmp_echo='selective'
)
p = Parameters(params=args)
assert p.icmp_echo == 'selective'
def test_module_parameters_auto_delete_disabled(self):
args = dict(
auto_delete='disabled'
)
p = Parameters(params=args)
assert p.auto_delete is False
def test_module_parameters_arp_state_disabled(self):
args = dict(
arp_state='disabled'
)
p = Parameters(params=args)
assert p.arp_state == 'disabled'
def test_module_parameters_use_route_advert_disabled(self):
args = dict(
use_route_advertisement='no'
)
p = Parameters(params=args)
assert p.use_route_advertisement == 'disabled'
def test_module_parameters_state_present(self):
args = dict(
state='present'
)
p = Parameters(params=args)
assert p.state == 'present'
assert p.enabled == 'yes'
def test_module_parameters_state_absent(self):
args = dict(
state='absent'
)
p = Parameters(params=args)
assert p.state == 'absent'
def test_module_parameters_state_enabled(self):
args = dict(
state='enabled'
)
p = Parameters(params=args)
assert p.state == 'enabled'
assert p.enabled == 'yes'
def test_module_parameters_state_disabled(self):
args = dict(
state='disabled'
)
p = Parameters(params=args)
assert p.state == 'disabled'
assert p.enabled == 'no'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_virtual_address(self, *args):
set_module_args(dict(
state='present',
address='1.1.1.1',
netmask='2.2.2.2',
connection_limit='10',
arp_state='enabled',
auto_delete='enabled',
icmp_echo='enabled',
advertise_route='always',
use_route_advertisement='yes',
password='admin',
server='localhost',
user='admin',
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
def test_delete_virtual_address(self, *args):
set_module_args(dict(
state='absent',
address='1.1.1.1',
password='admin',
server='localhost',
user='admin',
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(side_effect=[True, False])
mm.remove_from_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
|
rafaelolg/salienpy | refs/heads/master | view_saliency.py | 1 | #!/usr/bin/env python
import cv2
import numpy
import sys
from time import time
import salienpy.frequency_tuned
import salienpy.signature
import salienpy.dictionary_frequency
from salienpy.commons import minmaxnormalization
def main(img):
saliency_methods = [ ('dictionary_ica_saliency', salienpy.dictionary_frequency.dictionary_saliency),
('frequency_tuned', salienpy.frequency_tuned.frequency_tuned_saliency),
('signature', salienpy.signature.signature_saliency),
]
for name, method in saliency_methods:
print name
t = time()
sal_img = method(img.copy())
t = t - time()
sal_img = minmaxnormalization(sal_img)
cv2.imshow('%s took %ss'%(name, t),255 - (255 * sal_img).astype('uint8'))
cv2.imwrite(name+'.png',255 - (255 * sal_img).astype('uint8'))
cv2.waitKey()
if __name__ == '__main__':
if len(sys.argv) > 1:
img = cv2.imread(sys.argv[1])
else:
cam = cv2.VideoCapture(0)
status, img = cam.read()
main(img)
|
wujuguang/scrapy | refs/heads/master | tests/test_utils_serialize.py | 28 | import json
import unittest
import datetime
from decimal import Decimal
from twisted.internet import defer
from scrapy.utils.serialize import ScrapyJSONEncoder
from scrapy.http import Request, Response
class JsonEncoderTestCase(unittest.TestCase):
def setUp(self):
self.encoder = ScrapyJSONEncoder()
def test_encode_decode(self):
dt = datetime.datetime(2010, 1, 2, 10, 11, 12)
dts = "2010-01-02 10:11:12"
d = datetime.date(2010, 1, 2)
ds = "2010-01-02"
t = datetime.time(10, 11, 12)
ts = "10:11:12"
dec = Decimal("1000.12")
decs = "1000.12"
s = {'foo'}
ss = ['foo']
dt_set = {dt}
dt_sets = [dts]
for input, output in [('foo', 'foo'), (d, ds), (t, ts), (dt, dts),
(dec, decs), (['foo', d], ['foo', ds]), (s, ss),
(dt_set, dt_sets)]:
self.assertEqual(self.encoder.encode(input), json.dumps(output))
def test_encode_deferred(self):
self.assertIn('Deferred', self.encoder.encode(defer.Deferred()))
def test_encode_request(self):
r = Request("http://www.example.com/lala")
rs = self.encoder.encode(r)
self.assertIn(r.method, rs)
self.assertIn(r.url, rs)
def test_encode_response(self):
r = Response("http://www.example.com/lala")
rs = self.encoder.encode(r)
self.assertIn(r.url, rs)
self.assertIn(str(r.status), rs)
|
KaiRo-at/socorro | refs/heads/master | socorro/unittest/external/postgresql/test_crash_data.py | 2 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from nose.tools import eq_, ok_, assert_raises
from configman import ConfigurationManager, Namespace
from mock import Mock
from socorrolib.lib import (
MissingArgumentError,
ResourceNotFound,
ResourceUnavailable
)
from socorro.external.postgresql import crash_data, crashstorage
from socorro.unittest.testbase import TestCase
from socorro.unittest.external.postgresql.test_crashstorage import (
a_processed_crash
)
class TestIntegrationPostgresCrashData(TestCase):
def setUp(self):
super(TestIntegrationPostgresCrashData, self).setUp()
self.config_manager = self._common_config_setup()
self._truncate()
with self.config_manager.context() as config:
store = crashstorage.PostgreSQLCrashStorage(config.database)
# First we need to create the partitioned tables.
connection = store.database.connection()
cursor = connection.cursor()
table_data = (['reports', '1', '{id,uuid}',
'{date_processed,hangid,"product,version",reason,signature,url}',
'{}', 'date_processed', 'TIMESTAMPTZ'],
['plugins_reports', '2', '{"report_id,plugin_id"}',
'{"report_id,date_processed"}',
'{}', 'date_processed', 'TIMESTAMPTZ'],
['raw_crashes', '4', '{uuid}', '{}', '{}', 'date_processed',
'TIMESTAMPTZ'],
['processed_crashes', '6', '{uuid}', '{}', '{}', 'date_processed',
'TIMESTAMPTZ'])
query = """
INSERT INTO report_partition_info
(table_name, build_order, keys, indexes, fkeys, partition_column,
timetype)
VALUES (%s, %s, %s, %s, %s, %s, %s);
"""
cursor.executemany(query, table_data)
connection.commit()
cursor.execute("SELECT weekly_report_partitions(2, '2012-03-14');")
cursor.execute("SELECT weekly_report_partitions(2, '2012-08-20');")
connection.commit()
# A complete crash report (raw, dump and processed)
fake_raw_dump_1 = 'peter is a swede'
fake_raw_dump_2 = 'lars is a norseman'
fake_raw_dump_3 = 'adrian is a frenchman'
fake_dumps = {'upload_file_minidump': fake_raw_dump_1,
'lars': fake_raw_dump_2,
'adrian': fake_raw_dump_3}
fake_raw = {
'name': 'Peter',
'uuid': '114559a5-d8e6-428c-8b88-1c1f22120314',
'legacy_processing': 0,
'submitted_timestamp': '2012-03-15T00:00:00',
}
fake_processed = a_processed_crash.copy()
fake_processed.update({
'name': 'Peter',
'uuid': '114559a5-d8e6-428c-8b88-1c1f22120314',
'completeddatetime': '2012-03-15T00:00:00',
'date_processed': '2012-03-15T00:00:00',
'email': 'peter@fake.org',
})
store.save_raw_crash(
fake_raw,
fake_dumps,
'114559a5-d8e6-428c-8b88-1c1f22120314'
)
store.save_processed(fake_processed)
# A non-processed crash report
fake_raw = {
'name': 'Adrian',
'uuid': '58727744-12f5-454a-bcf5-f688a2120821',
'legacy_processing': 0,
'submitted_timestamp': '2012-08-24'
}
store.save_raw_crash(
fake_raw,
fake_dumps,
'58727744-12f5-454a-bcf5-f688a2120821'
)
def tearDown(self):
self._truncate()
super(TestIntegrationPostgresCrashData, self).tearDown()
def _truncate(self):
with self.config_manager.context() as config:
store = crashstorage.PostgreSQLCrashStorage(config.database)
connection = store.database.connection()
cursor = connection.cursor()
cursor.execute("""
TRUNCATE
report_partition_info,
plugins
CASCADE
""")
connection.commit()
def _common_config_setup(self):
mock_logging = Mock()
required_config = Namespace()
required_config.namespace('database')
required_config.database.crashstorage_class = \
crashstorage.PostgreSQLCrashStorage
required_config.database.add_option('logger', default=mock_logging)
config_manager = ConfigurationManager(
[required_config],
app_name='testapp',
app_version='1.0',
app_description='app description',
values_source_list=[{'database': {
'logger': mock_logging,
'database_name': 'socorro_integration_test',
'database_hostname': 'localhost',
'database_username': 'test',
'database_password': 'aPassword',
}}]
)
return config_manager
def test_get(self):
with self.config_manager.context() as config:
priorityjobs_mock = Mock()
service = crash_data.CrashData(
config=config,
all_services={'Priorityjobs': priorityjobs_mock}
)
params = {
'datatype': 'raw',
'uuid': '114559a5-d8e6-428c-8b88-1c1f22120314'
}
# get a raw crash
params['datatype'] = 'meta'
res_expected = {
'name': 'Peter',
'legacy_processing': 0,
'submitted_timestamp': '2012-03-15T00:00:00',
'uuid': '114559a5-d8e6-428c-8b88-1c1f22120314'
}
res = service.get(**params)
eq_(res, res_expected)
# get a processed crash
params['datatype'] = 'processed'
res_expected = a_processed_crash.copy()
res_expected.update({
'name': 'Peter',
'uuid': '114559a5-d8e6-428c-8b88-1c1f22120314',
'completeddatetime': '2012-01-01T00:00:00'
})
res = service.get(**params)
eq_(res['name'], 'Peter')
ok_('url' not in res)
ok_('email' not in res)
ok_('user_id' not in res)
ok_('exploitability' not in res)
# get a unredacted processed crash
params['datatype'] = 'unredacted'
res = service.get(**params)
eq_(res['name'], 'Peter')
ok_('url' in res)
ok_('email' in res)
ok_('user_id' in res)
ok_('exploitability' in res)
eq_(res['email'], 'peter@fake.org')
# missing parameters
assert_raises(
MissingArgumentError,
service.get
)
assert_raises(
MissingArgumentError,
service.get,
**{'uuid': '114559a5-d8e6-428c-8b88-1c1f22120314'}
)
# crash cannot be found
assert_raises(
ResourceNotFound,
service.get,
**{
'uuid': 'c44245f4-c93b-49b8-86a2-c15dc2130504',
'datatype': 'processed'
}
)
# crash cannot be found
assert_raises(
ResourceNotFound,
service.get,
**{
'uuid': 'c44245f4-c93b-49b8-86a2-c15dc2130504',
'datatype': 'unredacted'
}
)
# not yet available crash
assert_raises(
ResourceUnavailable,
service.get,
**{
'uuid': '58727744-12f5-454a-bcf5-f688a2120821',
'datatype': 'processed'
}
)
priorityjobs_mock.cls.return_value.create.assert_called_once_with(
uuid='58727744-12f5-454a-bcf5-f688a2120821'
)
priorityjobs_mock.cls.return_value.create.reset_mock()
# not yet available crash
assert_raises(
ResourceUnavailable,
service.get,
**{
'uuid': '58727744-12f5-454a-bcf5-f688a2120821',
'datatype': 'unredacted'
}
)
priorityjobs_mock.cls.return_value.create.assert_called_once_with(
uuid='58727744-12f5-454a-bcf5-f688a2120821'
)
|
sankhesh/VTK | refs/heads/master | ThirdParty/Twisted/twisted/conch/insults/__init__.py | 138 | """
Insults: a replacement for Curses/S-Lang.
Very basic at the moment."""
from twisted.python import deprecate, versions
deprecate.deprecatedModuleAttribute(
versions.Version("Twisted", 10, 1, 0),
"Please use twisted.conch.insults.helper instead.",
__name__, "colors")
deprecate.deprecatedModuleAttribute(
versions.Version("Twisted", 10, 1, 0),
"Please use twisted.conch.insults.insults instead.",
__name__, "client")
|
arbrandes/edx-platform | refs/heads/master | openedx/core/lib/tests/test_grade_utils.py | 5 | """
Tests for graph traversal generator functions.
"""
from unittest import TestCase
import ddt
import pytest
from ..grade_utils import compare_scores, round_away_from_zero
@ddt.ddt
class TestGradeUtils(TestCase):
""" Tests for the grade_utils module. """
@ddt.data(
(1, 2, 3, 4, False, True, 0.5, 0.75),
(3, 4, 1, 2, False, False, 0.75, 0.5),
(1, 2, 1, 2, False, True, 0.5, 0.5),
(1, 1, 0, 1, False, False, 1, 0),
)
@ddt.unpack
def test_compare_scores_happy_path(
self, earned_1, possible_1, earned_2, possible_2, treat_undefined_as_zero,
expected_is_higher, expected_percentage_1, expected_percentage_2
):
is_higher, percentage_1, percentage_2 = compare_scores(
earned_1, possible_1, earned_2, possible_2, treat_undefined_as_zero
)
assert expected_is_higher == is_higher
assert expected_percentage_1 == percentage_1
assert expected_percentage_2 == percentage_2
def test_compare_scores_raise_zero_division(self):
with pytest.raises(ZeroDivisionError):
compare_scores(1, 0, 1, 2)
with pytest.raises(ZeroDivisionError):
compare_scores(1, 2, 0, 0)
def test_compare_scores_treat_undefined_as_zero(self):
is_higher, percentage_1, percentage_2 = compare_scores(
0, 0, 0, 0, treat_undefined_as_zero=True
)
assert is_higher is True
assert 0 == percentage_1
assert 0 == percentage_2
@ddt.data(
(0.5, 1),
(1.49, 1),
(1.5, 2),
(1.51, 2),
(2.5, 3),
(1.45, 1.5, 1),
(-0.5, -1.0),
(-1.5, -2.0),
(-2.5, -3.0),
(-0.1, -0.0),
(0.1, 0.0),
(0.0, 0.0)
)
@ddt.unpack
def test_round_away_from_zero(self, precise, expected_rounded_number, rounding_precision=0):
assert round_away_from_zero(precise, rounding_precision) == expected_rounded_number
|
enomado/beaker | refs/heads/master | tests/test_namespacing.py | 3 | import os
import sys
def teardown():
import shutil
shutil.rmtree('./cache', True)
def test_consistent_namespacing():
sys.path.append(os.path.dirname(__file__))
from tests.test_namespacing_files.namespace_go import go
go()
|
avilaton/transitfeed | refs/heads/master | extensions/__init__.py | 8 | #!/usr/bin/python2.5
# Copyright (C) 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file marks the base folder of the FeedValiator extensions as a Python
# module. This module, however, is never used directly. So no imports are needed
# here. |
odoomrp/server-tools | refs/heads/8.0 | auth_dynamic_groups/model/res_groups.py | 23 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2013 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api, exceptions
from openerp.tools.safe_eval import safe_eval
from openerp import _
class res_groups(models.Model):
_inherit = 'res.groups'
is_dynamic = fields.Boolean('Dynamic')
dynamic_group_condition = fields.Text(
'Condition', help='The condition to be met for a user to be a '
'member of this group. It is evaluated as python code at login '
'time, you get `user` passed as a browse record')
@api.multi
def eval_dynamic_group_condition(self, uid=None):
user = self.env['res.users'].browse([uid]) if uid else self.env.user
result = all(
self.mapped(
lambda this: safe_eval(
this.dynamic_group_condition or 'False',
{
'user': user.sudo(),
'any': any,
'all': all,
'filter': filter,
})))
return result
@api.multi
@api.constrains('dynamic_group_condition')
def _check_dynamic_group_condition(self):
try:
self.filtered('is_dynamic').eval_dynamic_group_condition()
except (NameError, SyntaxError, TypeError):
raise exceptions.ValidationError(
_('The condition doesn\'t evaluate correctly!'))
@api.multi
def action_evaluate(self):
res_users = self.env['res.users']
for user in res_users.search([]):
res_users.update_dynamic_groups(user.id, self.env.cr.dbname)
|
weblyzard/ewrt | refs/heads/develop | tests/access/test_db.py | 1 | # #!/usr/bin/python
# # -*- coding: utf-8 -*-
# import unittest
#
# from nose.plugins.attrib import attr
#
# from eWRT.access.db import PostgresqlDb
#
#
# class TestDB(unittest.TestCase):
# """ @class TestDB
# db test cases
# """
# @attr("db")
# def test_context_protocol(self):
# """ tests the db module's support for the context protocol """
# from eWRT.config import DATABASE_CONNECTION
# with PostgresqlDb( **DATABASE_CONNECTION['wikipedia'] ) as q:
# assert len( q.query("SELECT * FROM concept LIMIT 5")) == 5
#
# @attr("db")
# def test_multi_processing(self):
# """ tests multiprocessing """
# from multiprocessing import Pool
# p = Pool(4)
# qq = 8 * ["SELECT * FROM concept LIMIT 1"]
#
# res = p.map(t_multiprocessing, qq)
#
#
# def t_multiprocessing(q):
# """ @remarks
# helper function for the multi processing test case
# """
# from eWRT.config import DATABASE_CONNECTION
#
# db = PostgresqlDb( **DATABASE_CONNECTION['wikipedia'] )
# r = db.query( q )
# db.close()
# return r
#
# if __name__ == '__main__':
# unittest.main() |
gnieboer/gnuradio | refs/heads/android | gr-filter/python/filter/__init__.py | 47 | #
# Copyright 2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
'''
Filter blocks and related functions.
'''
import os
try:
from filter_swig import *
except ImportError:
dirname, filename = os.path.split(os.path.abspath(__file__))
__path__.append(os.path.join(dirname, "..", "..", "swig"))
from filter_swig import *
from filterbank import *
from freq_xlating_fft_filter import *
from rational_resampler import *
import pfb
import optfir
# Pull this into the filter module
from gnuradio.fft import window
|
openpli-arm/enigma2-arm | refs/heads/master | tools/host_tools/FormatConverter/main.py | 44 | #!/usr/bin/python
from datasource import genericdatasource
from satxml import satxml
from lamedb import lamedb
from input import *
import sys
import os
maindata = genericdatasource()
sources = [satxml, lamedb]
datasources = [maindata]
for source in sources:
datasources.append(source())
for source in datasources:
source.setDatasources(datasources)
while(True):
os.system("/usr/bin/clear")
list = []
for index in range(len(datasources)):
list.append(datasources[index].getName() + (" (%d sats)" % len(datasources[index].transponderlist.keys())))
index = inputChoices(list, "q", "quit")
if index is None:
break
while(True):
print datasources[index].getStatus()
list = []
for action in datasources[index].getCapabilities():
list.append(action[0])
action = inputChoices(list)
if action is None:
break
datasources[index].getCapabilities()[action][1]()
#except:
# print sys.exc_info()
# print "sorry, could not execute that command"
|
rleigh-dundee/openmicroscopy | refs/heads/develop | components/tools/OmeroPy/test/scriptstest/coverage.py | 4 | #!/usr/bin/env python
"""
Integration test demonstrating various script creation methods
Copyright 2010 Glencoe Software, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
"""
import integration.library as lib
import unittest, os, sys
import omero
import omero.rtypes as OR
class TestCoverage(lib.ITest):
def setUp(self):
"""
getScripts returns official scripts, several of which are shipped with OMERO.
"""
lib.ITest.setUp(self)
self.rs = self.root.sf.getScriptService()
self.us = self.client.sf.getScriptService()
self.assert_( len(self.rs.getScripts()) > 0 )
self.assert_( len(self.us.getScripts()) > 0 )
self.assertEquals(0, len(self.us.getUserScripts([]))) # New user. No scripts
def testGetScriptWithDetails(self):
scriptList = self.us.getScripts()
script = scriptList[0]
scriptMap = self.us.getScriptWithDetails(script.id.val)
self.assertEquals(1, len(scriptMap))
scriptText = scriptMap.keys()[0]
scriptObj = scriptMap.values()[0]
def testUploadAndScript(self):
scriptID = self.us.uploadScript("/OME/Foo.py", """if True:
import omero
import omero.grid as OG
import omero.rtypes as OR
import omero.scripts as OS
client = OS.client("testUploadScript")
print "done"
""")
return scriptID
def testUserCantUploadOfficalScript(self):
self.assertRaises(omero.SecurityViolation,\
self.us.uploadOfficialScript,\
"/%s/fails.py" % self.uuid(), """if True:
import omero
""")
if __name__ == '__main__':
unittest.main()
|
Innovahn/odoo.old | refs/heads/master | addons/marketing_crm/__openerp__.py | 313 | # -*- coding: utf-8 -*-
{
'name': 'Marketing in CRM',
'version': '1.0',
'depends': ['marketing', 'crm'],
'author': 'OpenERP SA',
'category': 'Hidden/Dependency',
'description': """
Bridge module between marketing and CRM
""",
'website': 'https://www.odoo.com/page/crm',
'data': [
'views/crm.xml',
'views/res_config.xml',
],
'demo': [],
'installable': True,
'auto_install': True,
}
|
xindus40223115/2015cda_g1 | refs/heads/master | static/Brython3.1.3-20150514-095342/Lib/xml/dom/minidom.py | 727 | """Simple implementation of the Level 1 DOM.
Namespaces and other minor Level 2 features are also supported.
parse("foo.xml")
parseString("<foo><bar/></foo>")
Todo:
=====
* convenience methods for getting elements and text.
* more testing
* bring some of the writer and linearizer code into conformance with this
interface
* SAX 2 namespaces
"""
import io
import xml.dom
from xml.dom import EMPTY_NAMESPACE, EMPTY_PREFIX, XMLNS_NAMESPACE, domreg
from xml.dom.minicompat import *
from xml.dom.xmlbuilder import DOMImplementationLS, DocumentLS
# This is used by the ID-cache invalidation checks; the list isn't
# actually complete, since the nodes being checked will never be the
# DOCUMENT_NODE or DOCUMENT_FRAGMENT_NODE. (The node being checked is
# the node being added or removed, not the node being modified.)
#
_nodeTypes_with_children = (xml.dom.Node.ELEMENT_NODE,
xml.dom.Node.ENTITY_REFERENCE_NODE)
class Node(xml.dom.Node):
namespaceURI = None # this is non-null only for elements and attributes
parentNode = None
ownerDocument = None
nextSibling = None
previousSibling = None
prefix = EMPTY_PREFIX # non-null only for NS elements and attributes
def __bool__(self):
return True
def toxml(self, encoding=None):
return self.toprettyxml("", "", encoding)
def toprettyxml(self, indent="\t", newl="\n", encoding=None):
if encoding is None:
writer = io.StringIO()
else:
writer = io.TextIOWrapper(io.BytesIO(),
encoding=encoding,
errors="xmlcharrefreplace",
newline='\n')
if self.nodeType == Node.DOCUMENT_NODE:
# Can pass encoding only to document, to put it into XML header
self.writexml(writer, "", indent, newl, encoding)
else:
self.writexml(writer, "", indent, newl)
if encoding is None:
return writer.getvalue()
else:
return writer.detach().getvalue()
def hasChildNodes(self):
return bool(self.childNodes)
def _get_childNodes(self):
return self.childNodes
def _get_firstChild(self):
if self.childNodes:
return self.childNodes[0]
def _get_lastChild(self):
if self.childNodes:
return self.childNodes[-1]
def insertBefore(self, newChild, refChild):
if newChild.nodeType == self.DOCUMENT_FRAGMENT_NODE:
for c in tuple(newChild.childNodes):
self.insertBefore(c, refChild)
### The DOM does not clearly specify what to return in this case
return newChild
if newChild.nodeType not in self._child_node_types:
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(newChild), repr(self)))
if newChild.parentNode is not None:
newChild.parentNode.removeChild(newChild)
if refChild is None:
self.appendChild(newChild)
else:
try:
index = self.childNodes.index(refChild)
except ValueError:
raise xml.dom.NotFoundErr()
if newChild.nodeType in _nodeTypes_with_children:
_clear_id_cache(self)
self.childNodes.insert(index, newChild)
newChild.nextSibling = refChild
refChild.previousSibling = newChild
if index:
node = self.childNodes[index-1]
node.nextSibling = newChild
newChild.previousSibling = node
else:
newChild.previousSibling = None
newChild.parentNode = self
return newChild
def appendChild(self, node):
if node.nodeType == self.DOCUMENT_FRAGMENT_NODE:
for c in tuple(node.childNodes):
self.appendChild(c)
### The DOM does not clearly specify what to return in this case
return node
if node.nodeType not in self._child_node_types:
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(node), repr(self)))
elif node.nodeType in _nodeTypes_with_children:
_clear_id_cache(self)
if node.parentNode is not None:
node.parentNode.removeChild(node)
_append_child(self, node)
node.nextSibling = None
return node
def replaceChild(self, newChild, oldChild):
if newChild.nodeType == self.DOCUMENT_FRAGMENT_NODE:
refChild = oldChild.nextSibling
self.removeChild(oldChild)
return self.insertBefore(newChild, refChild)
if newChild.nodeType not in self._child_node_types:
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(newChild), repr(self)))
if newChild is oldChild:
return
if newChild.parentNode is not None:
newChild.parentNode.removeChild(newChild)
try:
index = self.childNodes.index(oldChild)
except ValueError:
raise xml.dom.NotFoundErr()
self.childNodes[index] = newChild
newChild.parentNode = self
oldChild.parentNode = None
if (newChild.nodeType in _nodeTypes_with_children
or oldChild.nodeType in _nodeTypes_with_children):
_clear_id_cache(self)
newChild.nextSibling = oldChild.nextSibling
newChild.previousSibling = oldChild.previousSibling
oldChild.nextSibling = None
oldChild.previousSibling = None
if newChild.previousSibling:
newChild.previousSibling.nextSibling = newChild
if newChild.nextSibling:
newChild.nextSibling.previousSibling = newChild
return oldChild
def removeChild(self, oldChild):
try:
self.childNodes.remove(oldChild)
except ValueError:
raise xml.dom.NotFoundErr()
if oldChild.nextSibling is not None:
oldChild.nextSibling.previousSibling = oldChild.previousSibling
if oldChild.previousSibling is not None:
oldChild.previousSibling.nextSibling = oldChild.nextSibling
oldChild.nextSibling = oldChild.previousSibling = None
if oldChild.nodeType in _nodeTypes_with_children:
_clear_id_cache(self)
oldChild.parentNode = None
return oldChild
def normalize(self):
L = []
for child in self.childNodes:
if child.nodeType == Node.TEXT_NODE:
if not child.data:
# empty text node; discard
if L:
L[-1].nextSibling = child.nextSibling
if child.nextSibling:
child.nextSibling.previousSibling = child.previousSibling
child.unlink()
elif L and L[-1].nodeType == child.nodeType:
# collapse text node
node = L[-1]
node.data = node.data + child.data
node.nextSibling = child.nextSibling
if child.nextSibling:
child.nextSibling.previousSibling = node
child.unlink()
else:
L.append(child)
else:
L.append(child)
if child.nodeType == Node.ELEMENT_NODE:
child.normalize()
self.childNodes[:] = L
def cloneNode(self, deep):
return _clone_node(self, deep, self.ownerDocument or self)
def isSupported(self, feature, version):
return self.ownerDocument.implementation.hasFeature(feature, version)
def _get_localName(self):
# Overridden in Element and Attr where localName can be Non-Null
return None
# Node interfaces from Level 3 (WD 9 April 2002)
def isSameNode(self, other):
return self is other
def getInterface(self, feature):
if self.isSupported(feature, None):
return self
else:
return None
# The "user data" functions use a dictionary that is only present
# if some user data has been set, so be careful not to assume it
# exists.
def getUserData(self, key):
try:
return self._user_data[key][0]
except (AttributeError, KeyError):
return None
def setUserData(self, key, data, handler):
old = None
try:
d = self._user_data
except AttributeError:
d = {}
self._user_data = d
if key in d:
old = d[key][0]
if data is None:
# ignore handlers passed for None
handler = None
if old is not None:
del d[key]
else:
d[key] = (data, handler)
return old
def _call_user_data_handler(self, operation, src, dst):
if hasattr(self, "_user_data"):
for key, (data, handler) in list(self._user_data.items()):
if handler is not None:
handler.handle(operation, key, data, src, dst)
# minidom-specific API:
def unlink(self):
self.parentNode = self.ownerDocument = None
if self.childNodes:
for child in self.childNodes:
child.unlink()
self.childNodes = NodeList()
self.previousSibling = None
self.nextSibling = None
# A Node is its own context manager, to ensure that an unlink() call occurs.
# This is similar to how a file object works.
def __enter__(self):
return self
def __exit__(self, et, ev, tb):
self.unlink()
defproperty(Node, "firstChild", doc="First child node, or None.")
defproperty(Node, "lastChild", doc="Last child node, or None.")
defproperty(Node, "localName", doc="Namespace-local name of this node.")
def _append_child(self, node):
# fast path with less checks; usable by DOM builders if careful
childNodes = self.childNodes
if childNodes:
last = childNodes[-1]
node.previousSibling = last
last.nextSibling = node
childNodes.append(node)
node.parentNode = self
def _in_document(node):
# return True iff node is part of a document tree
while node is not None:
if node.nodeType == Node.DOCUMENT_NODE:
return True
node = node.parentNode
return False
def _write_data(writer, data):
"Writes datachars to writer."
if data:
data = data.replace("&", "&").replace("<", "<"). \
replace("\"", """).replace(">", ">")
writer.write(data)
def _get_elements_by_tagName_helper(parent, name, rc):
for node in parent.childNodes:
if node.nodeType == Node.ELEMENT_NODE and \
(name == "*" or node.tagName == name):
rc.append(node)
_get_elements_by_tagName_helper(node, name, rc)
return rc
def _get_elements_by_tagName_ns_helper(parent, nsURI, localName, rc):
for node in parent.childNodes:
if node.nodeType == Node.ELEMENT_NODE:
if ((localName == "*" or node.localName == localName) and
(nsURI == "*" or node.namespaceURI == nsURI)):
rc.append(node)
_get_elements_by_tagName_ns_helper(node, nsURI, localName, rc)
return rc
class DocumentFragment(Node):
nodeType = Node.DOCUMENT_FRAGMENT_NODE
nodeName = "#document-fragment"
nodeValue = None
attributes = None
parentNode = None
_child_node_types = (Node.ELEMENT_NODE,
Node.TEXT_NODE,
Node.CDATA_SECTION_NODE,
Node.ENTITY_REFERENCE_NODE,
Node.PROCESSING_INSTRUCTION_NODE,
Node.COMMENT_NODE,
Node.NOTATION_NODE)
def __init__(self):
self.childNodes = NodeList()
class Attr(Node):
__slots__=('_name', '_value', 'namespaceURI',
'_prefix', 'childNodes', '_localName', 'ownerDocument', 'ownerElement')
nodeType = Node.ATTRIBUTE_NODE
attributes = None
specified = False
_is_id = False
_child_node_types = (Node.TEXT_NODE, Node.ENTITY_REFERENCE_NODE)
def __init__(self, qName, namespaceURI=EMPTY_NAMESPACE, localName=None,
prefix=None):
self.ownerElement = None
self._name = qName
self.namespaceURI = namespaceURI
self._prefix = prefix
self.childNodes = NodeList()
# Add the single child node that represents the value of the attr
self.childNodes.append(Text())
# nodeValue and value are set elsewhere
def _get_localName(self):
try:
return self._localName
except AttributeError:
return self.nodeName.split(":", 1)[-1]
def _get_name(self):
return self.name
def _get_specified(self):
return self.specified
def _get_name(self):
return self._name
def _set_name(self, value):
self._name = value
if self.ownerElement is not None:
_clear_id_cache(self.ownerElement)
nodeName = name = property(_get_name, _set_name)
def _get_value(self):
return self._value
def _set_value(self, value):
self._value = value
self.childNodes[0].data = value
if self.ownerElement is not None:
_clear_id_cache(self.ownerElement)
self.childNodes[0].data = value
nodeValue = value = property(_get_value, _set_value)
def _get_prefix(self):
return self._prefix
def _set_prefix(self, prefix):
nsuri = self.namespaceURI
if prefix == "xmlns":
if nsuri and nsuri != XMLNS_NAMESPACE:
raise xml.dom.NamespaceErr(
"illegal use of 'xmlns' prefix for the wrong namespace")
self._prefix = prefix
if prefix is None:
newName = self.localName
else:
newName = "%s:%s" % (prefix, self.localName)
if self.ownerElement:
_clear_id_cache(self.ownerElement)
self.name = newName
prefix = property(_get_prefix, _set_prefix)
def unlink(self):
# This implementation does not call the base implementation
# since most of that is not needed, and the expense of the
# method call is not warranted. We duplicate the removal of
# children, but that's all we needed from the base class.
elem = self.ownerElement
if elem is not None:
del elem._attrs[self.nodeName]
del elem._attrsNS[(self.namespaceURI, self.localName)]
if self._is_id:
self._is_id = False
elem._magic_id_nodes -= 1
self.ownerDocument._magic_id_count -= 1
for child in self.childNodes:
child.unlink()
del self.childNodes[:]
def _get_isId(self):
if self._is_id:
return True
doc = self.ownerDocument
elem = self.ownerElement
if doc is None or elem is None:
return False
info = doc._get_elem_info(elem)
if info is None:
return False
if self.namespaceURI:
return info.isIdNS(self.namespaceURI, self.localName)
else:
return info.isId(self.nodeName)
def _get_schemaType(self):
doc = self.ownerDocument
elem = self.ownerElement
if doc is None or elem is None:
return _no_type
info = doc._get_elem_info(elem)
if info is None:
return _no_type
if self.namespaceURI:
return info.getAttributeTypeNS(self.namespaceURI, self.localName)
else:
return info.getAttributeType(self.nodeName)
defproperty(Attr, "isId", doc="True if this attribute is an ID.")
defproperty(Attr, "localName", doc="Namespace-local name of this attribute.")
defproperty(Attr, "schemaType", doc="Schema type for this attribute.")
class NamedNodeMap(object):
"""The attribute list is a transient interface to the underlying
dictionaries. Mutations here will change the underlying element's
dictionary.
Ordering is imposed artificially and does not reflect the order of
attributes as found in an input document.
"""
__slots__ = ('_attrs', '_attrsNS', '_ownerElement')
def __init__(self, attrs, attrsNS, ownerElement):
self._attrs = attrs
self._attrsNS = attrsNS
self._ownerElement = ownerElement
def _get_length(self):
return len(self._attrs)
def item(self, index):
try:
return self[list(self._attrs.keys())[index]]
except IndexError:
return None
def items(self):
L = []
for node in self._attrs.values():
L.append((node.nodeName, node.value))
return L
def itemsNS(self):
L = []
for node in self._attrs.values():
L.append(((node.namespaceURI, node.localName), node.value))
return L
def __contains__(self, key):
if isinstance(key, str):
return key in self._attrs
else:
return key in self._attrsNS
def keys(self):
return self._attrs.keys()
def keysNS(self):
return self._attrsNS.keys()
def values(self):
return self._attrs.values()
def get(self, name, value=None):
return self._attrs.get(name, value)
__len__ = _get_length
def _cmp(self, other):
if self._attrs is getattr(other, "_attrs", None):
return 0
else:
return (id(self) > id(other)) - (id(self) < id(other))
def __eq__(self, other):
return self._cmp(other) == 0
def __ge__(self, other):
return self._cmp(other) >= 0
def __gt__(self, other):
return self._cmp(other) > 0
def __le__(self, other):
return self._cmp(other) <= 0
def __lt__(self, other):
return self._cmp(other) < 0
def __ne__(self, other):
return self._cmp(other) != 0
def __getitem__(self, attname_or_tuple):
if isinstance(attname_or_tuple, tuple):
return self._attrsNS[attname_or_tuple]
else:
return self._attrs[attname_or_tuple]
# same as set
def __setitem__(self, attname, value):
if isinstance(value, str):
try:
node = self._attrs[attname]
except KeyError:
node = Attr(attname)
node.ownerDocument = self._ownerElement.ownerDocument
self.setNamedItem(node)
node.value = value
else:
if not isinstance(value, Attr):
raise TypeError("value must be a string or Attr object")
node = value
self.setNamedItem(node)
def getNamedItem(self, name):
try:
return self._attrs[name]
except KeyError:
return None
def getNamedItemNS(self, namespaceURI, localName):
try:
return self._attrsNS[(namespaceURI, localName)]
except KeyError:
return None
def removeNamedItem(self, name):
n = self.getNamedItem(name)
if n is not None:
_clear_id_cache(self._ownerElement)
del self._attrs[n.nodeName]
del self._attrsNS[(n.namespaceURI, n.localName)]
if hasattr(n, 'ownerElement'):
n.ownerElement = None
return n
else:
raise xml.dom.NotFoundErr()
def removeNamedItemNS(self, namespaceURI, localName):
n = self.getNamedItemNS(namespaceURI, localName)
if n is not None:
_clear_id_cache(self._ownerElement)
del self._attrsNS[(n.namespaceURI, n.localName)]
del self._attrs[n.nodeName]
if hasattr(n, 'ownerElement'):
n.ownerElement = None
return n
else:
raise xml.dom.NotFoundErr()
def setNamedItem(self, node):
if not isinstance(node, Attr):
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(node), repr(self)))
old = self._attrs.get(node.name)
if old:
old.unlink()
self._attrs[node.name] = node
self._attrsNS[(node.namespaceURI, node.localName)] = node
node.ownerElement = self._ownerElement
_clear_id_cache(node.ownerElement)
return old
def setNamedItemNS(self, node):
return self.setNamedItem(node)
def __delitem__(self, attname_or_tuple):
node = self[attname_or_tuple]
_clear_id_cache(node.ownerElement)
node.unlink()
def __getstate__(self):
return self._attrs, self._attrsNS, self._ownerElement
def __setstate__(self, state):
self._attrs, self._attrsNS, self._ownerElement = state
defproperty(NamedNodeMap, "length",
doc="Number of nodes in the NamedNodeMap.")
AttributeList = NamedNodeMap
class TypeInfo(object):
__slots__ = 'namespace', 'name'
def __init__(self, namespace, name):
self.namespace = namespace
self.name = name
def __repr__(self):
if self.namespace:
return "<TypeInfo %r (from %r)>" % (self.name, self.namespace)
else:
return "<TypeInfo %r>" % self.name
def _get_name(self):
return self.name
def _get_namespace(self):
return self.namespace
_no_type = TypeInfo(None, None)
class Element(Node):
__slots__=('ownerDocument', 'parentNode', 'tagName', 'nodeName', 'prefix',
'namespaceURI', '_localName', 'childNodes', '_attrs', '_attrsNS',
'nextSibling', 'previousSibling')
nodeType = Node.ELEMENT_NODE
nodeValue = None
schemaType = _no_type
_magic_id_nodes = 0
_child_node_types = (Node.ELEMENT_NODE,
Node.PROCESSING_INSTRUCTION_NODE,
Node.COMMENT_NODE,
Node.TEXT_NODE,
Node.CDATA_SECTION_NODE,
Node.ENTITY_REFERENCE_NODE)
def __init__(self, tagName, namespaceURI=EMPTY_NAMESPACE, prefix=None,
localName=None):
self.parentNode = None
self.tagName = self.nodeName = tagName
self.prefix = prefix
self.namespaceURI = namespaceURI
self.childNodes = NodeList()
self.nextSibling = self.previousSibling = None
# Attribute dictionaries are lazily created
# attributes are double-indexed:
# tagName -> Attribute
# URI,localName -> Attribute
# in the future: consider lazy generation
# of attribute objects this is too tricky
# for now because of headaches with
# namespaces.
self._attrs = None
self._attrsNS = None
def _ensure_attributes(self):
if self._attrs is None:
self._attrs = {}
self._attrsNS = {}
def _get_localName(self):
try:
return self._localName
except AttributeError:
return self.tagName.split(":", 1)[-1]
def _get_tagName(self):
return self.tagName
def unlink(self):
if self._attrs is not None:
for attr in list(self._attrs.values()):
attr.unlink()
self._attrs = None
self._attrsNS = None
Node.unlink(self)
def getAttribute(self, attname):
if self._attrs is None:
return ""
try:
return self._attrs[attname].value
except KeyError:
return ""
def getAttributeNS(self, namespaceURI, localName):
if self._attrsNS is None:
return ""
try:
return self._attrsNS[(namespaceURI, localName)].value
except KeyError:
return ""
def setAttribute(self, attname, value):
attr = self.getAttributeNode(attname)
if attr is None:
attr = Attr(attname)
attr.value = value # also sets nodeValue
attr.ownerDocument = self.ownerDocument
self.setAttributeNode(attr)
elif value != attr.value:
attr.value = value
if attr.isId:
_clear_id_cache(self)
def setAttributeNS(self, namespaceURI, qualifiedName, value):
prefix, localname = _nssplit(qualifiedName)
attr = self.getAttributeNodeNS(namespaceURI, localname)
if attr is None:
attr = Attr(qualifiedName, namespaceURI, localname, prefix)
attr.value = value
attr.ownerDocument = self.ownerDocument
self.setAttributeNode(attr)
else:
if value != attr.value:
attr.value = value
if attr.isId:
_clear_id_cache(self)
if attr.prefix != prefix:
attr.prefix = prefix
attr.nodeName = qualifiedName
def getAttributeNode(self, attrname):
if self._attrs is None:
return None
return self._attrs.get(attrname)
def getAttributeNodeNS(self, namespaceURI, localName):
if self._attrsNS is None:
return None
return self._attrsNS.get((namespaceURI, localName))
def setAttributeNode(self, attr):
if attr.ownerElement not in (None, self):
raise xml.dom.InuseAttributeErr("attribute node already owned")
self._ensure_attributes()
old1 = self._attrs.get(attr.name, None)
if old1 is not None:
self.removeAttributeNode(old1)
old2 = self._attrsNS.get((attr.namespaceURI, attr.localName), None)
if old2 is not None and old2 is not old1:
self.removeAttributeNode(old2)
_set_attribute_node(self, attr)
if old1 is not attr:
# It might have already been part of this node, in which case
# it doesn't represent a change, and should not be returned.
return old1
if old2 is not attr:
return old2
setAttributeNodeNS = setAttributeNode
def removeAttribute(self, name):
if self._attrsNS is None:
raise xml.dom.NotFoundErr()
try:
attr = self._attrs[name]
except KeyError:
raise xml.dom.NotFoundErr()
self.removeAttributeNode(attr)
def removeAttributeNS(self, namespaceURI, localName):
if self._attrsNS is None:
raise xml.dom.NotFoundErr()
try:
attr = self._attrsNS[(namespaceURI, localName)]
except KeyError:
raise xml.dom.NotFoundErr()
self.removeAttributeNode(attr)
def removeAttributeNode(self, node):
if node is None:
raise xml.dom.NotFoundErr()
try:
self._attrs[node.name]
except KeyError:
raise xml.dom.NotFoundErr()
_clear_id_cache(self)
node.unlink()
# Restore this since the node is still useful and otherwise
# unlinked
node.ownerDocument = self.ownerDocument
removeAttributeNodeNS = removeAttributeNode
def hasAttribute(self, name):
if self._attrs is None:
return False
return name in self._attrs
def hasAttributeNS(self, namespaceURI, localName):
if self._attrsNS is None:
return False
return (namespaceURI, localName) in self._attrsNS
def getElementsByTagName(self, name):
return _get_elements_by_tagName_helper(self, name, NodeList())
def getElementsByTagNameNS(self, namespaceURI, localName):
return _get_elements_by_tagName_ns_helper(
self, namespaceURI, localName, NodeList())
def __repr__(self):
return "<DOM Element: %s at %#x>" % (self.tagName, id(self))
def writexml(self, writer, indent="", addindent="", newl=""):
# indent = current indentation
# addindent = indentation to add to higher levels
# newl = newline string
writer.write(indent+"<" + self.tagName)
attrs = self._get_attributes()
a_names = sorted(attrs.keys())
for a_name in a_names:
writer.write(" %s=\"" % a_name)
_write_data(writer, attrs[a_name].value)
writer.write("\"")
if self.childNodes:
writer.write(">")
if (len(self.childNodes) == 1 and
self.childNodes[0].nodeType == Node.TEXT_NODE):
self.childNodes[0].writexml(writer, '', '', '')
else:
writer.write(newl)
for node in self.childNodes:
node.writexml(writer, indent+addindent, addindent, newl)
writer.write(indent)
writer.write("</%s>%s" % (self.tagName, newl))
else:
writer.write("/>%s"%(newl))
def _get_attributes(self):
self._ensure_attributes()
return NamedNodeMap(self._attrs, self._attrsNS, self)
def hasAttributes(self):
if self._attrs:
return True
else:
return False
# DOM Level 3 attributes, based on the 22 Oct 2002 draft
def setIdAttribute(self, name):
idAttr = self.getAttributeNode(name)
self.setIdAttributeNode(idAttr)
def setIdAttributeNS(self, namespaceURI, localName):
idAttr = self.getAttributeNodeNS(namespaceURI, localName)
self.setIdAttributeNode(idAttr)
def setIdAttributeNode(self, idAttr):
if idAttr is None or not self.isSameNode(idAttr.ownerElement):
raise xml.dom.NotFoundErr()
if _get_containing_entref(self) is not None:
raise xml.dom.NoModificationAllowedErr()
if not idAttr._is_id:
idAttr._is_id = True
self._magic_id_nodes += 1
self.ownerDocument._magic_id_count += 1
_clear_id_cache(self)
defproperty(Element, "attributes",
doc="NamedNodeMap of attributes on the element.")
defproperty(Element, "localName",
doc="Namespace-local name of this element.")
def _set_attribute_node(element, attr):
_clear_id_cache(element)
element._ensure_attributes()
element._attrs[attr.name] = attr
element._attrsNS[(attr.namespaceURI, attr.localName)] = attr
# This creates a circular reference, but Element.unlink()
# breaks the cycle since the references to the attribute
# dictionaries are tossed.
attr.ownerElement = element
class Childless:
"""Mixin that makes childless-ness easy to implement and avoids
the complexity of the Node methods that deal with children.
"""
__slots__ = ()
attributes = None
childNodes = EmptyNodeList()
firstChild = None
lastChild = None
def _get_firstChild(self):
return None
def _get_lastChild(self):
return None
def appendChild(self, node):
raise xml.dom.HierarchyRequestErr(
self.nodeName + " nodes cannot have children")
def hasChildNodes(self):
return False
def insertBefore(self, newChild, refChild):
raise xml.dom.HierarchyRequestErr(
self.nodeName + " nodes do not have children")
def removeChild(self, oldChild):
raise xml.dom.NotFoundErr(
self.nodeName + " nodes do not have children")
def normalize(self):
# For childless nodes, normalize() has nothing to do.
pass
def replaceChild(self, newChild, oldChild):
raise xml.dom.HierarchyRequestErr(
self.nodeName + " nodes do not have children")
class ProcessingInstruction(Childless, Node):
nodeType = Node.PROCESSING_INSTRUCTION_NODE
__slots__ = ('target', 'data')
def __init__(self, target, data):
self.target = target
self.data = data
# nodeValue is an alias for data
def _get_nodeValue(self):
return self.data
def _set_nodeValue(self, value):
self.data = data
nodeValue = property(_get_nodeValue, _set_nodeValue)
# nodeName is an alias for target
def _get_nodeName(self):
return self.target
def _set_nodeName(self, value):
self.target = value
nodeName = property(_get_nodeName, _set_nodeName)
def writexml(self, writer, indent="", addindent="", newl=""):
writer.write("%s<?%s %s?>%s" % (indent,self.target, self.data, newl))
class CharacterData(Childless, Node):
__slots__=('_data', 'ownerDocument','parentNode', 'previousSibling', 'nextSibling')
def __init__(self):
self.ownerDocument = self.parentNode = None
self.previousSibling = self.nextSibling = None
self._data = ''
Node.__init__(self)
def _get_length(self):
return len(self.data)
__len__ = _get_length
def _get_data(self):
return self._data
def _set_data(self, data):
self._data = data
data = nodeValue = property(_get_data, _set_data)
def __repr__(self):
data = self.data
if len(data) > 10:
dotdotdot = "..."
else:
dotdotdot = ""
return '<DOM %s node "%r%s">' % (
self.__class__.__name__, data[0:10], dotdotdot)
def substringData(self, offset, count):
if offset < 0:
raise xml.dom.IndexSizeErr("offset cannot be negative")
if offset >= len(self.data):
raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
if count < 0:
raise xml.dom.IndexSizeErr("count cannot be negative")
return self.data[offset:offset+count]
def appendData(self, arg):
self.data = self.data + arg
def insertData(self, offset, arg):
if offset < 0:
raise xml.dom.IndexSizeErr("offset cannot be negative")
if offset >= len(self.data):
raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
if arg:
self.data = "%s%s%s" % (
self.data[:offset], arg, self.data[offset:])
def deleteData(self, offset, count):
if offset < 0:
raise xml.dom.IndexSizeErr("offset cannot be negative")
if offset >= len(self.data):
raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
if count < 0:
raise xml.dom.IndexSizeErr("count cannot be negative")
if count:
self.data = self.data[:offset] + self.data[offset+count:]
def replaceData(self, offset, count, arg):
if offset < 0:
raise xml.dom.IndexSizeErr("offset cannot be negative")
if offset >= len(self.data):
raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
if count < 0:
raise xml.dom.IndexSizeErr("count cannot be negative")
if count:
self.data = "%s%s%s" % (
self.data[:offset], arg, self.data[offset+count:])
defproperty(CharacterData, "length", doc="Length of the string data.")
class Text(CharacterData):
__slots__ = ()
nodeType = Node.TEXT_NODE
nodeName = "#text"
attributes = None
def splitText(self, offset):
if offset < 0 or offset > len(self.data):
raise xml.dom.IndexSizeErr("illegal offset value")
newText = self.__class__()
newText.data = self.data[offset:]
newText.ownerDocument = self.ownerDocument
next = self.nextSibling
if self.parentNode and self in self.parentNode.childNodes:
if next is None:
self.parentNode.appendChild(newText)
else:
self.parentNode.insertBefore(newText, next)
self.data = self.data[:offset]
return newText
def writexml(self, writer, indent="", addindent="", newl=""):
_write_data(writer, "%s%s%s" % (indent, self.data, newl))
# DOM Level 3 (WD 9 April 2002)
def _get_wholeText(self):
L = [self.data]
n = self.previousSibling
while n is not None:
if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
L.insert(0, n.data)
n = n.previousSibling
else:
break
n = self.nextSibling
while n is not None:
if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
L.append(n.data)
n = n.nextSibling
else:
break
return ''.join(L)
def replaceWholeText(self, content):
# XXX This needs to be seriously changed if minidom ever
# supports EntityReference nodes.
parent = self.parentNode
n = self.previousSibling
while n is not None:
if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
next = n.previousSibling
parent.removeChild(n)
n = next
else:
break
n = self.nextSibling
if not content:
parent.removeChild(self)
while n is not None:
if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
next = n.nextSibling
parent.removeChild(n)
n = next
else:
break
if content:
self.data = content
return self
else:
return None
def _get_isWhitespaceInElementContent(self):
if self.data.strip():
return False
elem = _get_containing_element(self)
if elem is None:
return False
info = self.ownerDocument._get_elem_info(elem)
if info is None:
return False
else:
return info.isElementContent()
defproperty(Text, "isWhitespaceInElementContent",
doc="True iff this text node contains only whitespace"
" and is in element content.")
defproperty(Text, "wholeText",
doc="The text of all logically-adjacent text nodes.")
def _get_containing_element(node):
c = node.parentNode
while c is not None:
if c.nodeType == Node.ELEMENT_NODE:
return c
c = c.parentNode
return None
def _get_containing_entref(node):
c = node.parentNode
while c is not None:
if c.nodeType == Node.ENTITY_REFERENCE_NODE:
return c
c = c.parentNode
return None
class Comment(CharacterData):
nodeType = Node.COMMENT_NODE
nodeName = "#comment"
def __init__(self, data):
CharacterData.__init__(self)
self._data = data
def writexml(self, writer, indent="", addindent="", newl=""):
if "--" in self.data:
raise ValueError("'--' is not allowed in a comment node")
writer.write("%s<!--%s-->%s" % (indent, self.data, newl))
class CDATASection(Text):
__slots__ = ()
nodeType = Node.CDATA_SECTION_NODE
nodeName = "#cdata-section"
def writexml(self, writer, indent="", addindent="", newl=""):
if self.data.find("]]>") >= 0:
raise ValueError("']]>' not allowed in a CDATA section")
writer.write("<![CDATA[%s]]>" % self.data)
class ReadOnlySequentialNamedNodeMap(object):
__slots__ = '_seq',
def __init__(self, seq=()):
# seq should be a list or tuple
self._seq = seq
def __len__(self):
return len(self._seq)
def _get_length(self):
return len(self._seq)
def getNamedItem(self, name):
for n in self._seq:
if n.nodeName == name:
return n
def getNamedItemNS(self, namespaceURI, localName):
for n in self._seq:
if n.namespaceURI == namespaceURI and n.localName == localName:
return n
def __getitem__(self, name_or_tuple):
if isinstance(name_or_tuple, tuple):
node = self.getNamedItemNS(*name_or_tuple)
else:
node = self.getNamedItem(name_or_tuple)
if node is None:
raise KeyError(name_or_tuple)
return node
def item(self, index):
if index < 0:
return None
try:
return self._seq[index]
except IndexError:
return None
def removeNamedItem(self, name):
raise xml.dom.NoModificationAllowedErr(
"NamedNodeMap instance is read-only")
def removeNamedItemNS(self, namespaceURI, localName):
raise xml.dom.NoModificationAllowedErr(
"NamedNodeMap instance is read-only")
def setNamedItem(self, node):
raise xml.dom.NoModificationAllowedErr(
"NamedNodeMap instance is read-only")
def setNamedItemNS(self, node):
raise xml.dom.NoModificationAllowedErr(
"NamedNodeMap instance is read-only")
def __getstate__(self):
return [self._seq]
def __setstate__(self, state):
self._seq = state[0]
defproperty(ReadOnlySequentialNamedNodeMap, "length",
doc="Number of entries in the NamedNodeMap.")
class Identified:
"""Mix-in class that supports the publicId and systemId attributes."""
__slots__ = 'publicId', 'systemId'
def _identified_mixin_init(self, publicId, systemId):
self.publicId = publicId
self.systemId = systemId
def _get_publicId(self):
return self.publicId
def _get_systemId(self):
return self.systemId
class DocumentType(Identified, Childless, Node):
nodeType = Node.DOCUMENT_TYPE_NODE
nodeValue = None
name = None
publicId = None
systemId = None
internalSubset = None
def __init__(self, qualifiedName):
self.entities = ReadOnlySequentialNamedNodeMap()
self.notations = ReadOnlySequentialNamedNodeMap()
if qualifiedName:
prefix, localname = _nssplit(qualifiedName)
self.name = localname
self.nodeName = self.name
def _get_internalSubset(self):
return self.internalSubset
def cloneNode(self, deep):
if self.ownerDocument is None:
# it's ok
clone = DocumentType(None)
clone.name = self.name
clone.nodeName = self.name
operation = xml.dom.UserDataHandler.NODE_CLONED
if deep:
clone.entities._seq = []
clone.notations._seq = []
for n in self.notations._seq:
notation = Notation(n.nodeName, n.publicId, n.systemId)
clone.notations._seq.append(notation)
n._call_user_data_handler(operation, n, notation)
for e in self.entities._seq:
entity = Entity(e.nodeName, e.publicId, e.systemId,
e.notationName)
entity.actualEncoding = e.actualEncoding
entity.encoding = e.encoding
entity.version = e.version
clone.entities._seq.append(entity)
e._call_user_data_handler(operation, n, entity)
self._call_user_data_handler(operation, self, clone)
return clone
else:
return None
def writexml(self, writer, indent="", addindent="", newl=""):
writer.write("<!DOCTYPE ")
writer.write(self.name)
if self.publicId:
writer.write("%s PUBLIC '%s'%s '%s'"
% (newl, self.publicId, newl, self.systemId))
elif self.systemId:
writer.write("%s SYSTEM '%s'" % (newl, self.systemId))
if self.internalSubset is not None:
writer.write(" [")
writer.write(self.internalSubset)
writer.write("]")
writer.write(">"+newl)
class Entity(Identified, Node):
attributes = None
nodeType = Node.ENTITY_NODE
nodeValue = None
actualEncoding = None
encoding = None
version = None
def __init__(self, name, publicId, systemId, notation):
self.nodeName = name
self.notationName = notation
self.childNodes = NodeList()
self._identified_mixin_init(publicId, systemId)
def _get_actualEncoding(self):
return self.actualEncoding
def _get_encoding(self):
return self.encoding
def _get_version(self):
return self.version
def appendChild(self, newChild):
raise xml.dom.HierarchyRequestErr(
"cannot append children to an entity node")
def insertBefore(self, newChild, refChild):
raise xml.dom.HierarchyRequestErr(
"cannot insert children below an entity node")
def removeChild(self, oldChild):
raise xml.dom.HierarchyRequestErr(
"cannot remove children from an entity node")
def replaceChild(self, newChild, oldChild):
raise xml.dom.HierarchyRequestErr(
"cannot replace children of an entity node")
class Notation(Identified, Childless, Node):
nodeType = Node.NOTATION_NODE
nodeValue = None
def __init__(self, name, publicId, systemId):
self.nodeName = name
self._identified_mixin_init(publicId, systemId)
class DOMImplementation(DOMImplementationLS):
_features = [("core", "1.0"),
("core", "2.0"),
("core", None),
("xml", "1.0"),
("xml", "2.0"),
("xml", None),
("ls-load", "3.0"),
("ls-load", None),
]
def hasFeature(self, feature, version):
if version == "":
version = None
return (feature.lower(), version) in self._features
def createDocument(self, namespaceURI, qualifiedName, doctype):
if doctype and doctype.parentNode is not None:
raise xml.dom.WrongDocumentErr(
"doctype object owned by another DOM tree")
doc = self._create_document()
add_root_element = not (namespaceURI is None
and qualifiedName is None
and doctype is None)
if not qualifiedName and add_root_element:
# The spec is unclear what to raise here; SyntaxErr
# would be the other obvious candidate. Since Xerces raises
# InvalidCharacterErr, and since SyntaxErr is not listed
# for createDocument, that seems to be the better choice.
# XXX: need to check for illegal characters here and in
# createElement.
# DOM Level III clears this up when talking about the return value
# of this function. If namespaceURI, qName and DocType are
# Null the document is returned without a document element
# Otherwise if doctype or namespaceURI are not None
# Then we go back to the above problem
raise xml.dom.InvalidCharacterErr("Element with no name")
if add_root_element:
prefix, localname = _nssplit(qualifiedName)
if prefix == "xml" \
and namespaceURI != "http://www.w3.org/XML/1998/namespace":
raise xml.dom.NamespaceErr("illegal use of 'xml' prefix")
if prefix and not namespaceURI:
raise xml.dom.NamespaceErr(
"illegal use of prefix without namespaces")
element = doc.createElementNS(namespaceURI, qualifiedName)
if doctype:
doc.appendChild(doctype)
doc.appendChild(element)
if doctype:
doctype.parentNode = doctype.ownerDocument = doc
doc.doctype = doctype
doc.implementation = self
return doc
def createDocumentType(self, qualifiedName, publicId, systemId):
doctype = DocumentType(qualifiedName)
doctype.publicId = publicId
doctype.systemId = systemId
return doctype
# DOM Level 3 (WD 9 April 2002)
def getInterface(self, feature):
if self.hasFeature(feature, None):
return self
else:
return None
# internal
def _create_document(self):
return Document()
class ElementInfo(object):
"""Object that represents content-model information for an element.
This implementation is not expected to be used in practice; DOM
builders should provide implementations which do the right thing
using information available to it.
"""
__slots__ = 'tagName',
def __init__(self, name):
self.tagName = name
def getAttributeType(self, aname):
return _no_type
def getAttributeTypeNS(self, namespaceURI, localName):
return _no_type
def isElementContent(self):
return False
def isEmpty(self):
"""Returns true iff this element is declared to have an EMPTY
content model."""
return False
def isId(self, aname):
"""Returns true iff the named attribute is a DTD-style ID."""
return False
def isIdNS(self, namespaceURI, localName):
"""Returns true iff the identified attribute is a DTD-style ID."""
return False
def __getstate__(self):
return self.tagName
def __setstate__(self, state):
self.tagName = state
def _clear_id_cache(node):
if node.nodeType == Node.DOCUMENT_NODE:
node._id_cache.clear()
node._id_search_stack = None
elif _in_document(node):
node.ownerDocument._id_cache.clear()
node.ownerDocument._id_search_stack= None
class Document(Node, DocumentLS):
__slots__ = ('_elem_info', 'doctype',
'_id_search_stack', 'childNodes', '_id_cache')
_child_node_types = (Node.ELEMENT_NODE, Node.PROCESSING_INSTRUCTION_NODE,
Node.COMMENT_NODE, Node.DOCUMENT_TYPE_NODE)
implementation = DOMImplementation()
nodeType = Node.DOCUMENT_NODE
nodeName = "#document"
nodeValue = None
attributes = None
parentNode = None
previousSibling = nextSibling = None
# Document attributes from Level 3 (WD 9 April 2002)
actualEncoding = None
encoding = None
standalone = None
version = None
strictErrorChecking = False
errorHandler = None
documentURI = None
_magic_id_count = 0
def __init__(self):
self.doctype = None
self.childNodes = NodeList()
# mapping of (namespaceURI, localName) -> ElementInfo
# and tagName -> ElementInfo
self._elem_info = {}
self._id_cache = {}
self._id_search_stack = None
def _get_elem_info(self, element):
if element.namespaceURI:
key = element.namespaceURI, element.localName
else:
key = element.tagName
return self._elem_info.get(key)
def _get_actualEncoding(self):
return self.actualEncoding
def _get_doctype(self):
return self.doctype
def _get_documentURI(self):
return self.documentURI
def _get_encoding(self):
return self.encoding
def _get_errorHandler(self):
return self.errorHandler
def _get_standalone(self):
return self.standalone
def _get_strictErrorChecking(self):
return self.strictErrorChecking
def _get_version(self):
return self.version
def appendChild(self, node):
if node.nodeType not in self._child_node_types:
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(node), repr(self)))
if node.parentNode is not None:
# This needs to be done before the next test since this
# may *be* the document element, in which case it should
# end up re-ordered to the end.
node.parentNode.removeChild(node)
if node.nodeType == Node.ELEMENT_NODE \
and self._get_documentElement():
raise xml.dom.HierarchyRequestErr(
"two document elements disallowed")
return Node.appendChild(self, node)
def removeChild(self, oldChild):
try:
self.childNodes.remove(oldChild)
except ValueError:
raise xml.dom.NotFoundErr()
oldChild.nextSibling = oldChild.previousSibling = None
oldChild.parentNode = None
if self.documentElement is oldChild:
self.documentElement = None
return oldChild
def _get_documentElement(self):
for node in self.childNodes:
if node.nodeType == Node.ELEMENT_NODE:
return node
def unlink(self):
if self.doctype is not None:
self.doctype.unlink()
self.doctype = None
Node.unlink(self)
def cloneNode(self, deep):
if not deep:
return None
clone = self.implementation.createDocument(None, None, None)
clone.encoding = self.encoding
clone.standalone = self.standalone
clone.version = self.version
for n in self.childNodes:
childclone = _clone_node(n, deep, clone)
assert childclone.ownerDocument.isSameNode(clone)
clone.childNodes.append(childclone)
if childclone.nodeType == Node.DOCUMENT_NODE:
assert clone.documentElement is None
elif childclone.nodeType == Node.DOCUMENT_TYPE_NODE:
assert clone.doctype is None
clone.doctype = childclone
childclone.parentNode = clone
self._call_user_data_handler(xml.dom.UserDataHandler.NODE_CLONED,
self, clone)
return clone
def createDocumentFragment(self):
d = DocumentFragment()
d.ownerDocument = self
return d
def createElement(self, tagName):
e = Element(tagName)
e.ownerDocument = self
return e
def createTextNode(self, data):
if not isinstance(data, str):
raise TypeError("node contents must be a string")
t = Text()
t.data = data
t.ownerDocument = self
return t
def createCDATASection(self, data):
if not isinstance(data, str):
raise TypeError("node contents must be a string")
c = CDATASection()
c.data = data
c.ownerDocument = self
return c
def createComment(self, data):
c = Comment(data)
c.ownerDocument = self
return c
def createProcessingInstruction(self, target, data):
p = ProcessingInstruction(target, data)
p.ownerDocument = self
return p
def createAttribute(self, qName):
a = Attr(qName)
a.ownerDocument = self
a.value = ""
return a
def createElementNS(self, namespaceURI, qualifiedName):
prefix, localName = _nssplit(qualifiedName)
e = Element(qualifiedName, namespaceURI, prefix)
e.ownerDocument = self
return e
def createAttributeNS(self, namespaceURI, qualifiedName):
prefix, localName = _nssplit(qualifiedName)
a = Attr(qualifiedName, namespaceURI, localName, prefix)
a.ownerDocument = self
a.value = ""
return a
# A couple of implementation-specific helpers to create node types
# not supported by the W3C DOM specs:
def _create_entity(self, name, publicId, systemId, notationName):
e = Entity(name, publicId, systemId, notationName)
e.ownerDocument = self
return e
def _create_notation(self, name, publicId, systemId):
n = Notation(name, publicId, systemId)
n.ownerDocument = self
return n
def getElementById(self, id):
if id in self._id_cache:
return self._id_cache[id]
if not (self._elem_info or self._magic_id_count):
return None
stack = self._id_search_stack
if stack is None:
# we never searched before, or the cache has been cleared
stack = [self.documentElement]
self._id_search_stack = stack
elif not stack:
# Previous search was completed and cache is still valid;
# no matching node.
return None
result = None
while stack:
node = stack.pop()
# add child elements to stack for continued searching
stack.extend([child for child in node.childNodes
if child.nodeType in _nodeTypes_with_children])
# check this node
info = self._get_elem_info(node)
if info:
# We have to process all ID attributes before
# returning in order to get all the attributes set to
# be IDs using Element.setIdAttribute*().
for attr in node.attributes.values():
if attr.namespaceURI:
if info.isIdNS(attr.namespaceURI, attr.localName):
self._id_cache[attr.value] = node
if attr.value == id:
result = node
elif not node._magic_id_nodes:
break
elif info.isId(attr.name):
self._id_cache[attr.value] = node
if attr.value == id:
result = node
elif not node._magic_id_nodes:
break
elif attr._is_id:
self._id_cache[attr.value] = node
if attr.value == id:
result = node
elif node._magic_id_nodes == 1:
break
elif node._magic_id_nodes:
for attr in node.attributes.values():
if attr._is_id:
self._id_cache[attr.value] = node
if attr.value == id:
result = node
if result is not None:
break
return result
def getElementsByTagName(self, name):
return _get_elements_by_tagName_helper(self, name, NodeList())
def getElementsByTagNameNS(self, namespaceURI, localName):
return _get_elements_by_tagName_ns_helper(
self, namespaceURI, localName, NodeList())
def isSupported(self, feature, version):
return self.implementation.hasFeature(feature, version)
def importNode(self, node, deep):
if node.nodeType == Node.DOCUMENT_NODE:
raise xml.dom.NotSupportedErr("cannot import document nodes")
elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
raise xml.dom.NotSupportedErr("cannot import document type nodes")
return _clone_node(node, deep, self)
def writexml(self, writer, indent="", addindent="", newl="", encoding=None):
if encoding is None:
writer.write('<?xml version="1.0" ?>'+newl)
else:
writer.write('<?xml version="1.0" encoding="%s"?>%s' % (
encoding, newl))
for node in self.childNodes:
node.writexml(writer, indent, addindent, newl)
# DOM Level 3 (WD 9 April 2002)
def renameNode(self, n, namespaceURI, name):
if n.ownerDocument is not self:
raise xml.dom.WrongDocumentErr(
"cannot rename nodes from other documents;\n"
"expected %s,\nfound %s" % (self, n.ownerDocument))
if n.nodeType not in (Node.ELEMENT_NODE, Node.ATTRIBUTE_NODE):
raise xml.dom.NotSupportedErr(
"renameNode() only applies to element and attribute nodes")
if namespaceURI != EMPTY_NAMESPACE:
if ':' in name:
prefix, localName = name.split(':', 1)
if ( prefix == "xmlns"
and namespaceURI != xml.dom.XMLNS_NAMESPACE):
raise xml.dom.NamespaceErr(
"illegal use of 'xmlns' prefix")
else:
if ( name == "xmlns"
and namespaceURI != xml.dom.XMLNS_NAMESPACE
and n.nodeType == Node.ATTRIBUTE_NODE):
raise xml.dom.NamespaceErr(
"illegal use of the 'xmlns' attribute")
prefix = None
localName = name
else:
prefix = None
localName = None
if n.nodeType == Node.ATTRIBUTE_NODE:
element = n.ownerElement
if element is not None:
is_id = n._is_id
element.removeAttributeNode(n)
else:
element = None
n.prefix = prefix
n._localName = localName
n.namespaceURI = namespaceURI
n.nodeName = name
if n.nodeType == Node.ELEMENT_NODE:
n.tagName = name
else:
# attribute node
n.name = name
if element is not None:
element.setAttributeNode(n)
if is_id:
element.setIdAttributeNode(n)
# It's not clear from a semantic perspective whether we should
# call the user data handlers for the NODE_RENAMED event since
# we're re-using the existing node. The draft spec has been
# interpreted as meaning "no, don't call the handler unless a
# new node is created."
return n
defproperty(Document, "documentElement",
doc="Top-level element of this document.")
def _clone_node(node, deep, newOwnerDocument):
"""
Clone a node and give it the new owner document.
Called by Node.cloneNode and Document.importNode
"""
if node.ownerDocument.isSameNode(newOwnerDocument):
operation = xml.dom.UserDataHandler.NODE_CLONED
else:
operation = xml.dom.UserDataHandler.NODE_IMPORTED
if node.nodeType == Node.ELEMENT_NODE:
clone = newOwnerDocument.createElementNS(node.namespaceURI,
node.nodeName)
for attr in node.attributes.values():
clone.setAttributeNS(attr.namespaceURI, attr.nodeName, attr.value)
a = clone.getAttributeNodeNS(attr.namespaceURI, attr.localName)
a.specified = attr.specified
if deep:
for child in node.childNodes:
c = _clone_node(child, deep, newOwnerDocument)
clone.appendChild(c)
elif node.nodeType == Node.DOCUMENT_FRAGMENT_NODE:
clone = newOwnerDocument.createDocumentFragment()
if deep:
for child in node.childNodes:
c = _clone_node(child, deep, newOwnerDocument)
clone.appendChild(c)
elif node.nodeType == Node.TEXT_NODE:
clone = newOwnerDocument.createTextNode(node.data)
elif node.nodeType == Node.CDATA_SECTION_NODE:
clone = newOwnerDocument.createCDATASection(node.data)
elif node.nodeType == Node.PROCESSING_INSTRUCTION_NODE:
clone = newOwnerDocument.createProcessingInstruction(node.target,
node.data)
elif node.nodeType == Node.COMMENT_NODE:
clone = newOwnerDocument.createComment(node.data)
elif node.nodeType == Node.ATTRIBUTE_NODE:
clone = newOwnerDocument.createAttributeNS(node.namespaceURI,
node.nodeName)
clone.specified = True
clone.value = node.value
elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
assert node.ownerDocument is not newOwnerDocument
operation = xml.dom.UserDataHandler.NODE_IMPORTED
clone = newOwnerDocument.implementation.createDocumentType(
node.name, node.publicId, node.systemId)
clone.ownerDocument = newOwnerDocument
if deep:
clone.entities._seq = []
clone.notations._seq = []
for n in node.notations._seq:
notation = Notation(n.nodeName, n.publicId, n.systemId)
notation.ownerDocument = newOwnerDocument
clone.notations._seq.append(notation)
if hasattr(n, '_call_user_data_handler'):
n._call_user_data_handler(operation, n, notation)
for e in node.entities._seq:
entity = Entity(e.nodeName, e.publicId, e.systemId,
e.notationName)
entity.actualEncoding = e.actualEncoding
entity.encoding = e.encoding
entity.version = e.version
entity.ownerDocument = newOwnerDocument
clone.entities._seq.append(entity)
if hasattr(e, '_call_user_data_handler'):
e._call_user_data_handler(operation, n, entity)
else:
# Note the cloning of Document and DocumentType nodes is
# implementation specific. minidom handles those cases
# directly in the cloneNode() methods.
raise xml.dom.NotSupportedErr("Cannot clone node %s" % repr(node))
# Check for _call_user_data_handler() since this could conceivably
# used with other DOM implementations (one of the FourThought
# DOMs, perhaps?).
if hasattr(node, '_call_user_data_handler'):
node._call_user_data_handler(operation, node, clone)
return clone
def _nssplit(qualifiedName):
fields = qualifiedName.split(':', 1)
if len(fields) == 2:
return fields
else:
return (None, fields[0])
def _do_pulldom_parse(func, args, kwargs):
events = func(*args, **kwargs)
toktype, rootNode = events.getEvent()
events.expandNode(rootNode)
events.clear()
return rootNode
def parse(file, parser=None, bufsize=None):
"""Parse a file into a DOM by filename or file object."""
if parser is None and not bufsize:
from xml.dom import expatbuilder
return expatbuilder.parse(file)
else:
from xml.dom import pulldom
return _do_pulldom_parse(pulldom.parse, (file,),
{'parser': parser, 'bufsize': bufsize})
def parseString(string, parser=None):
"""Parse a file into a DOM from a string."""
if parser is None:
from xml.dom import expatbuilder
return expatbuilder.parseString(string)
else:
from xml.dom import pulldom
return _do_pulldom_parse(pulldom.parseString, (string,),
{'parser': parser})
def getDOMImplementation(features=None):
if features:
if isinstance(features, str):
features = domreg._parse_feature_string(features)
for f, v in features:
if not Document.implementation.hasFeature(f, v):
return None
return Document.implementation
|
codepython/restcommander | refs/heads/master | play-1.2.4/python/Lib/site-packages/readline.py | 4 | # -*- coding: UTF-8 -*-
#this file is needed in site-packages to emulate readline
#necessary for rlcompleter since it relies on the existance
#of a readline module
from pyreadline import *
|
Hazelsuko07/17WarmingUp | refs/heads/hy_try | py3.6/lib/python3.6/site-packages/pip/_vendor/requests/packages/urllib3/util/url.py | 713 | from __future__ import absolute_import
from collections import namedtuple
from ..exceptions import LocationParseError
url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment']
class Url(namedtuple('Url', url_attrs)):
"""
Datastructure for representing an HTTP URL. Used as a return value for
:func:`parse_url`.
"""
slots = ()
def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None,
query=None, fragment=None):
if path and not path.startswith('/'):
path = '/' + path
return super(Url, cls).__new__(cls, scheme, auth, host, port, path,
query, fragment)
@property
def hostname(self):
"""For backwards-compatibility with urlparse. We're nice like that."""
return self.host
@property
def request_uri(self):
"""Absolute path including the query string."""
uri = self.path or '/'
if self.query is not None:
uri += '?' + self.query
return uri
@property
def netloc(self):
"""Network location including host and port"""
if self.port:
return '%s:%d' % (self.host, self.port)
return self.host
@property
def url(self):
"""
Convert self into a url
This function should more or less round-trip with :func:`.parse_url`. The
returned url may not be exactly the same as the url inputted to
:func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
with a blank port will have : removed).
Example: ::
>>> U = parse_url('http://google.com/mail/')
>>> U.url
'http://google.com/mail/'
>>> Url('http', 'username:password', 'host.com', 80,
... '/path', 'query', 'fragment').url
'http://username:password@host.com:80/path?query#fragment'
"""
scheme, auth, host, port, path, query, fragment = self
url = ''
# We use "is not None" we want things to happen with empty strings (or 0 port)
if scheme is not None:
url += scheme + '://'
if auth is not None:
url += auth + '@'
if host is not None:
url += host
if port is not None:
url += ':' + str(port)
if path is not None:
url += path
if query is not None:
url += '?' + query
if fragment is not None:
url += '#' + fragment
return url
def __str__(self):
return self.url
def split_first(s, delims):
"""
Given a string and an iterable of delimiters, split on the first found
delimiter. Return two split parts and the matched delimiter.
If not found, then the first part is the full input string.
Example::
>>> split_first('foo/bar?baz', '?/=')
('foo', 'bar?baz', '/')
>>> split_first('foo/bar?baz', '123')
('foo/bar?baz', '', None)
Scales linearly with number of delims. Not ideal for large number of delims.
"""
min_idx = None
min_delim = None
for d in delims:
idx = s.find(d)
if idx < 0:
continue
if min_idx is None or idx < min_idx:
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0:
return s, '', None
return s[:min_idx], s[min_idx + 1:], min_delim
def parse_url(url):
"""
Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
performed to parse incomplete urls. Fields not provided will be None.
Partly backwards-compatible with :mod:`urlparse`.
Example::
>>> parse_url('http://google.com/mail/')
Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
>>> parse_url('google.com:80')
Url(scheme=None, host='google.com', port=80, path=None, ...)
>>> parse_url('/foo?bar')
Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
"""
# While this code has overlap with stdlib's urlparse, it is much
# simplified for our needs and less annoying.
# Additionally, this implementations does silly things to be optimal
# on CPython.
if not url:
# Empty
return Url()
scheme = None
auth = None
host = None
port = None
path = None
fragment = None
query = None
# Scheme
if '://' in url:
scheme, url = url.split('://', 1)
# Find the earliest Authority Terminator
# (http://tools.ietf.org/html/rfc3986#section-3.2)
url, path_, delim = split_first(url, ['/', '?', '#'])
if delim:
# Reassemble the path
path = delim + path_
# Auth
if '@' in url:
# Last '@' denotes end of auth part
auth, url = url.rsplit('@', 1)
# IPv6
if url and url[0] == '[':
host, url = url.split(']', 1)
host += ']'
# Port
if ':' in url:
_host, port = url.split(':', 1)
if not host:
host = _host
if port:
# If given, ports must be integers.
if not port.isdigit():
raise LocationParseError(url)
port = int(port)
else:
# Blank ports are cool, too. (rfc3986#section-3.2.3)
port = None
elif not host and url:
host = url
if not path:
return Url(scheme, auth, host, port, path, query, fragment)
# Fragment
if '#' in path:
path, fragment = path.split('#', 1)
# Query
if '?' in path:
path, query = path.split('?', 1)
return Url(scheme, auth, host, port, path, query, fragment)
def get_host(url):
"""
Deprecated. Use :func:`.parse_url` instead.
"""
p = parse_url(url)
return p.scheme or 'http', p.hostname, p.port
|
coders-circle/Notifica | refs/heads/master | web/routine/admin.py | 1 | from django.contrib import admin
from routine.models import *
class PeriodInline(admin.StackedInline):
model = Period
extra = 5
class RoutineAdmin(admin.ModelAdmin):
inlines = [PeriodInline]
admin.site.register(Routine, RoutineAdmin)
admin.site.register(Period)
|
catapult-project/catapult | refs/heads/master | third_party/google-endpoints/httplib2/socks.py | 811 | """SocksiPy - Python SOCKS module.
Version 1.00
Copyright 2006 Dan-Haim. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of Dan Haim nor the names of his contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY DAN HAIM "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL DAN HAIM OR HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMANGE.
This module provides a standard socket-like interface for Python
for tunneling connections through SOCKS proxies.
"""
"""
Minor modifications made by Christopher Gilbert (http://motomastyle.com/)
for use in PyLoris (http://pyloris.sourceforge.net/)
Minor modifications made by Mario Vilas (http://breakingcode.wordpress.com/)
mainly to merge bug fixes found in Sourceforge
"""
import base64
import socket
import struct
import sys
if getattr(socket, 'socket', None) is None:
raise ImportError('socket.socket missing, proxy support unusable')
PROXY_TYPE_SOCKS4 = 1
PROXY_TYPE_SOCKS5 = 2
PROXY_TYPE_HTTP = 3
PROXY_TYPE_HTTP_NO_TUNNEL = 4
_defaultproxy = None
_orgsocket = socket.socket
class ProxyError(Exception): pass
class GeneralProxyError(ProxyError): pass
class Socks5AuthError(ProxyError): pass
class Socks5Error(ProxyError): pass
class Socks4Error(ProxyError): pass
class HTTPError(ProxyError): pass
_generalerrors = ("success",
"invalid data",
"not connected",
"not available",
"bad proxy type",
"bad input")
_socks5errors = ("succeeded",
"general SOCKS server failure",
"connection not allowed by ruleset",
"Network unreachable",
"Host unreachable",
"Connection refused",
"TTL expired",
"Command not supported",
"Address type not supported",
"Unknown error")
_socks5autherrors = ("succeeded",
"authentication is required",
"all offered authentication methods were rejected",
"unknown username or invalid password",
"unknown error")
_socks4errors = ("request granted",
"request rejected or failed",
"request rejected because SOCKS server cannot connect to identd on the client",
"request rejected because the client program and identd report different user-ids",
"unknown error")
def setdefaultproxy(proxytype=None, addr=None, port=None, rdns=True, username=None, password=None):
"""setdefaultproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets a default proxy which all further socksocket objects will use,
unless explicitly changed.
"""
global _defaultproxy
_defaultproxy = (proxytype, addr, port, rdns, username, password)
def wrapmodule(module):
"""wrapmodule(module)
Attempts to replace a module's socket library with a SOCKS socket. Must set
a default proxy using setdefaultproxy(...) first.
This will only work on modules that import socket directly into the namespace;
most of the Python Standard Library falls into this category.
"""
if _defaultproxy != None:
module.socket.socket = socksocket
else:
raise GeneralProxyError((4, "no proxy specified"))
class socksocket(socket.socket):
"""socksocket([family[, type[, proto]]]) -> socket object
Open a SOCKS enabled socket. The parameters are the same as
those of the standard socket init. In order for SOCKS to work,
you must specify family=AF_INET, type=SOCK_STREAM and proto=0.
"""
def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, _sock=None):
_orgsocket.__init__(self, family, type, proto, _sock)
if _defaultproxy != None:
self.__proxy = _defaultproxy
else:
self.__proxy = (None, None, None, None, None, None)
self.__proxysockname = None
self.__proxypeername = None
self.__httptunnel = True
def __recvall(self, count):
"""__recvall(count) -> data
Receive EXACTLY the number of bytes requested from the socket.
Blocks until the required number of bytes have been received.
"""
data = self.recv(count)
while len(data) < count:
d = self.recv(count-len(data))
if not d: raise GeneralProxyError((0, "connection closed unexpectedly"))
data = data + d
return data
def sendall(self, content, *args):
""" override socket.socket.sendall method to rewrite the header
for non-tunneling proxies if needed
"""
if not self.__httptunnel:
content = self.__rewriteproxy(content)
return super(socksocket, self).sendall(content, *args)
def __rewriteproxy(self, header):
""" rewrite HTTP request headers to support non-tunneling proxies
(i.e. those which do not support the CONNECT method).
This only works for HTTP (not HTTPS) since HTTPS requires tunneling.
"""
host, endpt = None, None
hdrs = header.split("\r\n")
for hdr in hdrs:
if hdr.lower().startswith("host:"):
host = hdr
elif hdr.lower().startswith("get") or hdr.lower().startswith("post"):
endpt = hdr
if host and endpt:
hdrs.remove(host)
hdrs.remove(endpt)
host = host.split(" ")[1]
endpt = endpt.split(" ")
if (self.__proxy[4] != None and self.__proxy[5] != None):
hdrs.insert(0, self.__getauthheader())
hdrs.insert(0, "Host: %s" % host)
hdrs.insert(0, "%s http://%s%s %s" % (endpt[0], host, endpt[1], endpt[2]))
return "\r\n".join(hdrs)
def __getauthheader(self):
auth = self.__proxy[4] + ":" + self.__proxy[5]
return "Proxy-Authorization: Basic " + base64.b64encode(auth)
def setproxy(self, proxytype=None, addr=None, port=None, rdns=True, username=None, password=None):
"""setproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets the proxy to be used.
proxytype - The type of the proxy to be used. Three types
are supported: PROXY_TYPE_SOCKS4 (including socks4a),
PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
addr - The address of the server (IP or DNS).
port - The port of the server. Defaults to 1080 for SOCKS
servers and 8080 for HTTP proxy servers.
rdns - Should DNS queries be preformed on the remote side
(rather than the local side). The default is True.
Note: This has no effect with SOCKS4 servers.
username - Username to authenticate with to the server.
The default is no authentication.
password - Password to authenticate with to the server.
Only relevant when username is also provided.
"""
self.__proxy = (proxytype, addr, port, rdns, username, password)
def __negotiatesocks5(self, destaddr, destport):
"""__negotiatesocks5(self,destaddr,destport)
Negotiates a connection through a SOCKS5 server.
"""
# First we'll send the authentication packages we support.
if (self.__proxy[4]!=None) and (self.__proxy[5]!=None):
# The username/password details were supplied to the
# setproxy method so we support the USERNAME/PASSWORD
# authentication (in addition to the standard none).
self.sendall(struct.pack('BBBB', 0x05, 0x02, 0x00, 0x02))
else:
# No username/password were entered, therefore we
# only support connections with no authentication.
self.sendall(struct.pack('BBB', 0x05, 0x01, 0x00))
# We'll receive the server's response to determine which
# method was selected
chosenauth = self.__recvall(2)
if chosenauth[0:1] != chr(0x05).encode():
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
# Check the chosen authentication method
if chosenauth[1:2] == chr(0x00).encode():
# No authentication is required
pass
elif chosenauth[1:2] == chr(0x02).encode():
# Okay, we need to perform a basic username/password
# authentication.
self.sendall(chr(0x01).encode() + chr(len(self.__proxy[4])) + self.__proxy[4] + chr(len(self.__proxy[5])) + self.__proxy[5])
authstat = self.__recvall(2)
if authstat[0:1] != chr(0x01).encode():
# Bad response
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if authstat[1:2] != chr(0x00).encode():
# Authentication failed
self.close()
raise Socks5AuthError((3, _socks5autherrors[3]))
# Authentication succeeded
else:
# Reaching here is always bad
self.close()
if chosenauth[1] == chr(0xFF).encode():
raise Socks5AuthError((2, _socks5autherrors[2]))
else:
raise GeneralProxyError((1, _generalerrors[1]))
# Now we can request the actual connection
req = struct.pack('BBB', 0x05, 0x01, 0x00)
# If the given destination address is an IP address, we'll
# use the IPv4 address request even if remote resolving was specified.
try:
ipaddr = socket.inet_aton(destaddr)
req = req + chr(0x01).encode() + ipaddr
except socket.error:
# Well it's not an IP number, so it's probably a DNS name.
if self.__proxy[3]:
# Resolve remotely
ipaddr = None
req = req + chr(0x03).encode() + chr(len(destaddr)).encode() + destaddr
else:
# Resolve locally
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
req = req + chr(0x01).encode() + ipaddr
req = req + struct.pack(">H", destport)
self.sendall(req)
# Get the response
resp = self.__recvall(4)
if resp[0:1] != chr(0x05).encode():
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
elif resp[1:2] != chr(0x00).encode():
# Connection failed
self.close()
if ord(resp[1:2])<=8:
raise Socks5Error((ord(resp[1:2]), _socks5errors[ord(resp[1:2])]))
else:
raise Socks5Error((9, _socks5errors[9]))
# Get the bound address/port
elif resp[3:4] == chr(0x01).encode():
boundaddr = self.__recvall(4)
elif resp[3:4] == chr(0x03).encode():
resp = resp + self.recv(1)
boundaddr = self.__recvall(ord(resp[4:5]))
else:
self.close()
raise GeneralProxyError((1,_generalerrors[1]))
boundport = struct.unpack(">H", self.__recvall(2))[0]
self.__proxysockname = (boundaddr, boundport)
if ipaddr != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
else:
self.__proxypeername = (destaddr, destport)
def getproxysockname(self):
"""getsockname() -> address info
Returns the bound IP address and port number at the proxy.
"""
return self.__proxysockname
def getproxypeername(self):
"""getproxypeername() -> address info
Returns the IP and port number of the proxy.
"""
return _orgsocket.getpeername(self)
def getpeername(self):
"""getpeername() -> address info
Returns the IP address and port number of the destination
machine (note: getproxypeername returns the proxy)
"""
return self.__proxypeername
def __negotiatesocks4(self,destaddr,destport):
"""__negotiatesocks4(self,destaddr,destport)
Negotiates a connection through a SOCKS4 server.
"""
# Check if the destination address provided is an IP address
rmtrslv = False
try:
ipaddr = socket.inet_aton(destaddr)
except socket.error:
# It's a DNS name. Check where it should be resolved.
if self.__proxy[3]:
ipaddr = struct.pack("BBBB", 0x00, 0x00, 0x00, 0x01)
rmtrslv = True
else:
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
# Construct the request packet
req = struct.pack(">BBH", 0x04, 0x01, destport) + ipaddr
# The username parameter is considered userid for SOCKS4
if self.__proxy[4] != None:
req = req + self.__proxy[4]
req = req + chr(0x00).encode()
# DNS name if remote resolving is required
# NOTE: This is actually an extension to the SOCKS4 protocol
# called SOCKS4A and may not be supported in all cases.
if rmtrslv:
req = req + destaddr + chr(0x00).encode()
self.sendall(req)
# Get the response from the server
resp = self.__recvall(8)
if resp[0:1] != chr(0x00).encode():
# Bad data
self.close()
raise GeneralProxyError((1,_generalerrors[1]))
if resp[1:2] != chr(0x5A).encode():
# Server returned an error
self.close()
if ord(resp[1:2]) in (91, 92, 93):
self.close()
raise Socks4Error((ord(resp[1:2]), _socks4errors[ord(resp[1:2]) - 90]))
else:
raise Socks4Error((94, _socks4errors[4]))
# Get the bound address/port
self.__proxysockname = (socket.inet_ntoa(resp[4:]), struct.unpack(">H", resp[2:4])[0])
if rmtrslv != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
else:
self.__proxypeername = (destaddr, destport)
def __negotiatehttp(self, destaddr, destport):
"""__negotiatehttp(self,destaddr,destport)
Negotiates a connection through an HTTP server.
"""
# If we need to resolve locally, we do this now
if not self.__proxy[3]:
addr = socket.gethostbyname(destaddr)
else:
addr = destaddr
headers = ["CONNECT ", addr, ":", str(destport), " HTTP/1.1\r\n"]
headers += ["Host: ", destaddr, "\r\n"]
if (self.__proxy[4] != None and self.__proxy[5] != None):
headers += [self.__getauthheader(), "\r\n"]
headers.append("\r\n")
self.sendall("".join(headers).encode())
# We read the response until we get the string "\r\n\r\n"
resp = self.recv(1)
while resp.find("\r\n\r\n".encode()) == -1:
resp = resp + self.recv(1)
# We just need the first line to check if the connection
# was successful
statusline = resp.splitlines()[0].split(" ".encode(), 2)
if statusline[0] not in ("HTTP/1.0".encode(), "HTTP/1.1".encode()):
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
try:
statuscode = int(statusline[1])
except ValueError:
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if statuscode != 200:
self.close()
raise HTTPError((statuscode, statusline[2]))
self.__proxysockname = ("0.0.0.0", 0)
self.__proxypeername = (addr, destport)
def connect(self, destpair):
"""connect(self, despair)
Connects to the specified destination through a proxy.
destpar - A tuple of the IP/DNS address and the port number.
(identical to socket's connect).
To select the proxy server use setproxy().
"""
# Do a minimal input check first
if (not type(destpair) in (list,tuple)) or (len(destpair) < 2) or (not isinstance(destpair[0], basestring)) or (type(destpair[1]) != int):
raise GeneralProxyError((5, _generalerrors[5]))
if self.__proxy[0] == PROXY_TYPE_SOCKS5:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
_orgsocket.connect(self, (self.__proxy[1], portnum))
self.__negotiatesocks5(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_SOCKS4:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
_orgsocket.connect(self,(self.__proxy[1], portnum))
self.__negotiatesocks4(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_HTTP:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 8080
_orgsocket.connect(self,(self.__proxy[1], portnum))
self.__negotiatehttp(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_HTTP_NO_TUNNEL:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 8080
_orgsocket.connect(self,(self.__proxy[1],portnum))
if destpair[1] == 443:
self.__negotiatehttp(destpair[0],destpair[1])
else:
self.__httptunnel = False
elif self.__proxy[0] == None:
_orgsocket.connect(self, (destpair[0], destpair[1]))
else:
raise GeneralProxyError((4, _generalerrors[4]))
|
leansoft/edx-platform | refs/heads/master | common/djangoapps/course_modes/migrations/0001_initial.py | 114 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CourseMode'
db.create_table('course_modes_coursemode', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('course_id', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('mode_slug', self.gf('django.db.models.fields.CharField')(max_length=100)),
('mode_display_name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('min_price', self.gf('django.db.models.fields.IntegerField')(default=0)),
('suggested_prices', self.gf('django.db.models.fields.CommaSeparatedIntegerField')(default='', max_length=255, blank=True)),
))
db.send_create_signal('course_modes', ['CourseMode'])
def backwards(self, orm):
# Deleting model 'CourseMode'
db.delete_table('course_modes_coursemode')
models = {
'course_modes.coursemode': {
'Meta': {'object_name': 'CourseMode'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'min_price': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'mode_display_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'mode_slug': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'suggested_prices': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': "''", 'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['course_modes']
|
etherkit/OpenBeacon2 | refs/heads/master | client/linux-arm/venv/lib/python3.6/site-packages/PyInstaller/hooks/hook-sqlalchemy.py | 4 | #-----------------------------------------------------------------------------
# Copyright (c) 2005-2020, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
import re
from PyInstaller.utils.hooks import (
exec_statement, is_module_satisfies, logger)
from PyInstaller.compat import open_file, text_read_mode
from PyInstaller.lib.modulegraph.modulegraph import SourceModule
from PyInstaller.lib.modulegraph.util import guess_encoding
# 'sqlalchemy.testing' causes bundling a lot of unnecessary modules.
excludedimports = ['sqlalchemy.testing']
# include most common database bindings
# some database bindings are detected and include some
# are not. We should explicitly include database backends.
hiddenimports = ['pysqlite2', 'MySQLdb', 'psycopg2']
# In SQLAlchemy >= 0.6, the "sqlalchemy.dialects" package provides dialects.
if is_module_satisfies('sqlalchemy >= 0.6'):
dialects = exec_statement("import sqlalchemy.dialects;print(sqlalchemy.dialects.__all__)")
dialects = eval(dialects.strip())
for n in dialects:
hiddenimports.append("sqlalchemy.dialects." + n)
# In SQLAlchemy <= 0.5, the "sqlalchemy.databases" package provides dialects.
else:
databases = exec_statement("import sqlalchemy.databases; print(sqlalchemy.databases.__all__)")
databases = eval(databases.strip())
for n in databases:
hiddenimports.append("sqlalchemy.databases." + n)
def hook(hook_api):
"""
SQLAlchemy 0.9 introduced the decorator 'util.dependencies'. This
decorator does imports. eg:
@util.dependencies("sqlalchemy.sql.schema")
This hook scans for included SQLAlchemy modules and then scans those modules
for any util.dependencies and marks those modules as hidden imports.
"""
if not is_module_satisfies('sqlalchemy >= 0.9'):
return
# this parser is very simplistic but seems to catch all cases as of V1.1
depend_regex = re.compile(r'@util.dependencies\([\'"](.*?)[\'"]\)')
hidden_imports_set = set()
known_imports = set()
for node in hook_api.module_graph.flatten(start=hook_api.module):
if isinstance(node, SourceModule) and \
node.identifier.startswith('sqlalchemy.'):
known_imports.add(node.identifier)
# Determine the encoding of the source file.
with open_file(node.filename, 'rb') as f:
encoding = guess_encoding(f)
# Use that to open the file.
with open_file(node.filename, text_read_mode,
encoding=encoding) as f:
for match in depend_regex.findall(f.read()):
hidden_imports_set.add(match)
hidden_imports_set -= known_imports
if len(hidden_imports_set):
logger.info(" Found %d sqlalchemy hidden imports",
len(hidden_imports_set))
hook_api.add_imports(*list(hidden_imports_set))
|
great-expectations/great_expectations | refs/heads/develop | great_expectations/render/page_renderer_util.py | 1 | import warnings
from great_expectations.render.renderer import ValidationResultsPageRenderer
from great_expectations.render.view import DefaultMarkdownPageView
from great_expectations.validation_operators.types.validation_operator_result import (
ValidationOperatorResult,
)
def render_multiple_validation_result_pages_markdown(
validation_operator_result: ValidationOperatorResult,
run_info_at_end: bool = True,
) -> str:
"""
Loop through and render multiple validation results to markdown.
Args:
validation_operator_result: (ValidationOperatorResult) Result of validation operator run
run_info_at_end: move run info below expectation results
Returns:
string containing formatted markdown validation results
"""
warnings.warn(
"This 'render_multiple_validation_result_pages_markdown' function will be deprecated "
"Please use ValidationResultsPageRenderer.render_validation_operator_result() instead."
"E.g. to replicate the functionality of rendering a ValidationOperatorResult to markdown:"
"validation_results_page_renderer = ValidationResultsPageRenderer("
" run_info_at_end=run_info_at_end"
")"
"rendered_document_content_list = validation_results_page_renderer.render_validation_operator_result("
" validation_operator_result=validation_operator_result"
")"
'return " ".join(DefaultMarkdownPageView().render(rendered_document_content_list))'
"Please update code accordingly.",
DeprecationWarning,
)
validation_results_page_renderer = ValidationResultsPageRenderer(
run_info_at_end=run_info_at_end
)
rendered_document_content_list = (
validation_results_page_renderer.render_validation_operator_result(
validation_operator_result=validation_operator_result
)
)
return " ".join(DefaultMarkdownPageView().render(rendered_document_content_list))
|
sruizr/pysync_redmine | refs/heads/master | pysync_redmine/repositories/ganttproject.py | 1 | import datetime
import xml.etree.ElementTree as ET
from pysync_redmine.domain import (Repository,
Project,
Member,
Task,
Phase,
Calendar,
RelationSet,
StringTree)
import pdb
class GanttRepo(Repository):
def __init__(self):
self.class_key = 'GanttRepo'
Repository.__init__(self)
def open_source(self, **setup_pars):
self.setup_pars = setup_pars
self.source = ET.parse(setup_pars['filename']).getroot()
project_name = self.source.attrib['name']
self.project = Project(project_name, self)
def load_members(self):
project = self.project
members = {}
resources = self.source.findall('./resources/resource')
functions = self.source.findall('./roles/role')
for resource in resources:
role = resource.attrib['function']
for function in functions:
if function.attrib['id'] == role:
role = function.attrib['name']
break
member = Member(project, resource.attrib['name'], role)
member._id = int(resource.attrib['id'])
members[member._id] = member
member._snap()
project.members = members
def load_calendar(self):
self.project.calendar = Calendar()
def load_phases(self):
project = self.project
phases = {}
resources = self.source.findall('./tasks/task[task]')
for resource in resources:
phase = Phase(project)
phase_id = int(resource.attrib['id'])
phase._id = phase_id
name = resource.attrib['name']
key, description = name.split('. ')
key = key.strip()
description = description.strip()
phase.key = key
phase.description = description
start_date = datetime.datetime.strptime(
resource.attrib['start'],
'%Y-%m-%d').date()
phase.due_date = project.calendar.get_end_date(
start_date,
int(resource.attrib['duration'])
)
phases[phase_id] = phase
phase._snap()
project.phases = phases
def load_tasks(self):
project = self.project
tasks = {}
resources = self.source.findall('./tasks//task')
for resource in resources:
if int(resource.attrib['id']) not in project.phases:
task = Task(project)
task._id = int(resource.attrib['id'])
task.description = resource.attrib['name']
task.start_date = datetime.datetime.strptime(
resource.attrib['start'],
'%Y-%m-%d').date()
task.duration = int(resource.attrib['duration'])
task.complete = int(resource.attrib['complete'])
tasks[task._id] = task
project.tasks = tasks
input_id = self.source.findall('./tasks/taskproperties/taskproperty'
'[@name="inputs"]')
if input_id:
input_id = input_id[0].attrib['id']
output_id = self.source.findall('./tasks/taskproperties/taskproperty'
'[@name="outputs"]')
if output_id:
output_id = output_id[0].attrib['id']
for resource in resources:
if int(resource.attrib['id']) not in project.phases:
task = project.tasks[int(resource.attrib['id'])]
for child in resource:
if child.tag == 'task':
subtask = project.tasks[int(child.attrib['id'])]
subtask.parent = task
if child.tag == 'depend':
next_task = project.tasks[int(child.attrib['id'])]
task.relations.add_next(next_task,
int(child.attrib['difference'])
)
if child.tag == 'customproperty':
if child.attrib['taskproperty-id'] == input_id:
task.inputs = self._get_tokens(
child.attrib['value']
)
if child.attrib['taskproperty-id'] == output_id:
task.outputs = self._get_tokens(
child.attrib['value']
)
resources = self.source.findall('./allocations/allocation')
for resource in resources:
task_id = int(resource.attrib['task-id'])
if task_id in project.tasks:
task = project.tasks[task_id]
member = project.members[int(
resource.attrib['resource-id']
)]
if resource.attrib['responsible'] == 'true':
task.assigned_to = member
else:
task.colaborators.append(member)
for phase in project.phases.values():
resources = self.source.findall(
"./tasks/task[@id='{}']//task".format(
phase._id
)
)
for resource in resources:
task = project.tasks[int(resource.attrib['id'])]
task.phase = phase
phase.tasks.append(task)
for task in project.tasks.values():
task._snap()
def _get_tokens(self, token_string):
tokens = token_string.split(',')
result = []
for token in tokens:
token = token.strip()
token = token.split('//')
token = [e.strip() for e in token]
token = self.project.tokens.add_node(token)
result.append(token)
return result
|
goliate/sarakha63-persomov | refs/heads/master | libs/pyutil/scripts/passphrase.py | 92 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse, math, random
from pyutil.mathutil import div_ceil
from pkg_resources import resource_stream
def recursive_subset_sum(entropy_needed, wordlists):
# Pick a minimalish set of numbers which sum to at least
# entropy_needed.
# Okay now what's the smallest number of words which will give us
# at least this much entropy?
entropy_of_biggest_wordlist = wordlists[-1][0]
assert isinstance(entropy_of_biggest_wordlist, float), wordlists[-1]
needed_words = div_ceil(entropy_needed, entropy_of_biggest_wordlist)
# How much entropy do we need from each word?
needed_entropy_per_word = entropy_needed / needed_words
# What's the smallest wordlist that offers at least this much
# entropy per word?
for (wlentropy, wl) in wordlists:
if wlentropy >= needed_entropy_per_word:
break
assert wlentropy >= needed_entropy_per_word, (wlentropy, needed_entropy_per_word)
result = [(wlentropy, wl)]
# If we need more, recurse...
if wlentropy < entropy_needed:
rest = recursive_subset_sum(entropy_needed - wlentropy, wordlists)
result.extend(rest)
return result
def gen_passphrase(entropy, allwords):
maxlenwords = []
i = 2 # The smallest set is words of length 1 or 2.
words = [x for x in allwords if len(x) <= i]
maxlenwords.append((math.log(len(words), 2), words))
while len(maxlenwords[-1][1]) < len(allwords):
i += 1
words = [x for x in allwords if len(x) <= i]
maxlenwords.append((math.log(len(words), 2), words))
sr = random.SystemRandom()
passphrase = []
wordlists_to_use = recursive_subset_sum(entropy, maxlenwords)
passphraseentropy = 0.0
for (wle, wl) in wordlists_to_use:
passphrase.append(sr.choice(wl))
passphraseentropy += wle
return (u".".join(passphrase), passphraseentropy)
def main():
parser = argparse.ArgumentParser(prog="chbs", description="Create a random passphrase by picking a few random words.")
parser.add_argument('-d', '--dictionary', help="what file to read a list of words from (or omit this option to use chbs's bundled dictionary)", type=argparse.FileType('rU'), metavar="DICT")
parser.add_argument('bits', help="how many bits of entropy minimum", type=float, metavar="BITS")
args = parser.parse_args()
dicti = args.dictionary
if not dicti:
dicti = resource_stream('pyutil', 'data/wordlist.txt')
allwords = set([x.decode('utf-8').strip().lower() for x in dicti.readlines()])
passphrase, bits = gen_passphrase(args.bits, allwords)
print u"Your new password is: '%s'. It is worth about %s bits." % (passphrase, bits)
|
GaelMagnan/PyGitHook | refs/heads/master | src/Tasks/CheckMergeErrorTask.py | 1 | """
Hook task to check if a merge error has been made.
Uses regex to check for existance of standard merge separator.
Inherite of this class and modify the regex attribute for custom merge separator.
AUTHOR:
Gael Magnan de bornier
"""
import re
from src.Tasks.HookTask import HookNewOrModifiedFileTask
class CheckMergeErrorTask(HookNewOrModifiedFileTask):
regex = "^.*(>>>>>>>|<<<<<<<).*$"
def execute(self, filename, file_value, **kwargs):
print("> Checking bad merge for '%s' ... " % (filename))
i = 0
error = False
for line in file_value:
i = i + 1
err_code = re.search(self.regex, line)
if err_code is not None:
print(">> Merge Errors(%s) Line:%d, please check the file\n" %
(str(err_code.group(0)), i))
error = True
if not error:
print(">> OK")
return not error
|
moble/sympy | refs/heads/master | sympy/physics/quantum/identitysearch.py | 58 | from __future__ import print_function, division
from collections import deque
from random import randint
from sympy.core.compatibility import range
from sympy.external import import_module
from sympy import Mul, Basic, Number, Pow, Integer
from sympy.physics.quantum.represent import represent
from sympy.physics.quantum.dagger import Dagger
__all__ = [
# Public interfaces
'generate_gate_rules',
'generate_equivalent_ids',
'GateIdentity',
'bfs_identity_search',
'random_identity_search',
# "Private" functions
'is_scalar_sparse_matrix',
'is_scalar_nonsparse_matrix',
'is_degenerate',
'is_reducible',
]
np = import_module('numpy')
scipy = import_module('scipy', __import__kwargs={'fromlist': ['sparse']})
def is_scalar_sparse_matrix(circuit, nqubits, identity_only, eps=1e-11):
"""Checks if a given scipy.sparse matrix is a scalar matrix.
A scalar matrix is such that B = bI, where B is the scalar
matrix, b is some scalar multiple, and I is the identity
matrix. A scalar matrix would have only the element b along
it's main diagonal and zeroes elsewhere.
Parameters
==========
circuit : Gate tuple
Sequence of quantum gates representing a quantum circuit
nqubits : int
Number of qubits in the circuit
identity_only : bool
Check for only identity matrices
eps : number
The tolerance value for zeroing out elements in the matrix.
Values in the range [-eps, +eps] will be changed to a zero.
"""
if not np or not scipy:
pass
matrix = represent(Mul(*circuit), nqubits=nqubits,
format='scipy.sparse')
# In some cases, represent returns a 1D scalar value in place
# of a multi-dimensional scalar matrix
if (isinstance(matrix, int)):
return matrix == 1 if identity_only else True
# If represent returns a matrix, check if the matrix is diagonal
# and if every item along the diagonal is the same
else:
# Due to floating pointing operations, must zero out
# elements that are "very" small in the dense matrix
# See parameter for default value.
# Get the ndarray version of the dense matrix
dense_matrix = matrix.todense().getA()
# Since complex values can't be compared, must split
# the matrix into real and imaginary components
# Find the real values in between -eps and eps
bool_real = np.logical_and(dense_matrix.real > -eps,
dense_matrix.real < eps)
# Find the imaginary values between -eps and eps
bool_imag = np.logical_and(dense_matrix.imag > -eps,
dense_matrix.imag < eps)
# Replaces values between -eps and eps with 0
corrected_real = np.where(bool_real, 0.0, dense_matrix.real)
corrected_imag = np.where(bool_imag, 0.0, dense_matrix.imag)
# Convert the matrix with real values into imaginary values
corrected_imag = corrected_imag * np.complex(1j)
# Recombine the real and imaginary components
corrected_dense = corrected_real + corrected_imag
# Check if it's diagonal
row_indices = corrected_dense.nonzero()[0]
col_indices = corrected_dense.nonzero()[1]
# Check if the rows indices and columns indices are the same
# If they match, then matrix only contains elements along diagonal
bool_indices = row_indices == col_indices
is_diagonal = bool_indices.all()
first_element = corrected_dense[0][0]
# If the first element is a zero, then can't rescale matrix
# and definitely not diagonal
if (first_element == 0.0 + 0.0j):
return False
# The dimensions of the dense matrix should still
# be 2^nqubits if there are elements all along the
# the main diagonal
trace_of_corrected = (corrected_dense/first_element).trace()
expected_trace = pow(2, nqubits)
has_correct_trace = trace_of_corrected == expected_trace
# If only looking for identity matrices
# first element must be a 1
real_is_one = abs(first_element.real - 1.0) < eps
imag_is_zero = abs(first_element.imag) < eps
is_one = real_is_one and imag_is_zero
is_identity = is_one if identity_only else True
return bool(is_diagonal and has_correct_trace and is_identity)
def is_scalar_nonsparse_matrix(circuit, nqubits, identity_only):
"""Checks if a given circuit, in matrix form, is equivalent to
a scalar value.
Parameters
==========
circuit : Gate tuple
Sequence of quantum gates representing a quantum circuit
nqubits : int
Number of qubits in the circuit
identity_only : bool
Check for only identity matrices
Note: Used in situations when is_scalar_sparse_matrix has bugs
"""
matrix = represent(Mul(*circuit), nqubits=nqubits)
# In some cases, represent returns a 1D scalar value in place
# of a multi-dimensional scalar matrix
if (isinstance(matrix, Number)):
return matrix == 1 if identity_only else True
# If represent returns a matrix, check if the matrix is diagonal
# and if every item along the diagonal is the same
else:
# Added up the diagonal elements
matrix_trace = matrix.trace()
# Divide the trace by the first element in the matrix
# if matrix is not required to be the identity matrix
adjusted_matrix_trace = (matrix_trace/matrix[0]
if not identity_only
else matrix_trace)
is_identity = matrix[0] == 1.0 if identity_only else True
has_correct_trace = adjusted_matrix_trace == pow(2, nqubits)
# The matrix is scalar if it's diagonal and the adjusted trace
# value is equal to 2^nqubits
return bool(
matrix.is_diagonal() and has_correct_trace and is_identity)
if np and scipy:
is_scalar_matrix = is_scalar_sparse_matrix
else:
is_scalar_matrix = is_scalar_nonsparse_matrix
def _get_min_qubits(a_gate):
if isinstance(a_gate, Pow):
return a_gate.base.min_qubits
else:
return a_gate.min_qubits
def ll_op(left, right):
"""Perform a LL operation.
A LL operation multiplies both left and right circuits
with the dagger of the left circuit's leftmost gate, and
the dagger is multiplied on the left side of both circuits.
If a LL is possible, it returns the new gate rule as a
2-tuple (LHS, RHS), where LHS is the left circuit and
and RHS is the right circuit of the new rule.
If a LL is not possible, None is returned.
Parameters
==========
left : Gate tuple
The left circuit of a gate rule expression.
right : Gate tuple
The right circuit of a gate rule expression.
Examples
========
Generate a new gate rule using a LL operation:
>>> from sympy.physics.quantum.identitysearch import ll_op
>>> from sympy.physics.quantum.gate import X, Y, Z
>>> x = X(0); y = Y(0); z = Z(0)
>>> ll_op((x, y, z), ())
((Y(0), Z(0)), (X(0),))
>>> ll_op((y, z), (x,))
((Z(0),), (Y(0), X(0)))
"""
if (len(left) > 0):
ll_gate = left[0]
ll_gate_is_unitary = is_scalar_matrix(
(Dagger(ll_gate), ll_gate), _get_min_qubits(ll_gate), True)
if (len(left) > 0 and ll_gate_is_unitary):
# Get the new left side w/o the leftmost gate
new_left = left[1:len(left)]
# Add the leftmost gate to the left position on the right side
new_right = (Dagger(ll_gate),) + right
# Return the new gate rule
return (new_left, new_right)
return None
def lr_op(left, right):
"""Perform a LR operation.
A LR operation multiplies both left and right circuits
with the dagger of the left circuit's rightmost gate, and
the dagger is multiplied on the right side of both circuits.
If a LR is possible, it returns the new gate rule as a
2-tuple (LHS, RHS), where LHS is the left circuit and
and RHS is the right circuit of the new rule.
If a LR is not possible, None is returned.
Parameters
==========
left : Gate tuple
The left circuit of a gate rule expression.
right : Gate tuple
The right circuit of a gate rule expression.
Examples
========
Generate a new gate rule using a LR operation:
>>> from sympy.physics.quantum.identitysearch import lr_op
>>> from sympy.physics.quantum.gate import X, Y, Z
>>> x = X(0); y = Y(0); z = Z(0)
>>> lr_op((x, y, z), ())
((X(0), Y(0)), (Z(0),))
>>> lr_op((x, y), (z,))
((X(0),), (Z(0), Y(0)))
"""
if (len(left) > 0):
lr_gate = left[len(left) - 1]
lr_gate_is_unitary = is_scalar_matrix(
(Dagger(lr_gate), lr_gate), _get_min_qubits(lr_gate), True)
if (len(left) > 0 and lr_gate_is_unitary):
# Get the new left side w/o the rightmost gate
new_left = left[0:len(left) - 1]
# Add the rightmost gate to the right position on the right side
new_right = right + (Dagger(lr_gate),)
# Return the new gate rule
return (new_left, new_right)
return None
def rl_op(left, right):
"""Perform a RL operation.
A RL operation multiplies both left and right circuits
with the dagger of the right circuit's leftmost gate, and
the dagger is multiplied on the left side of both circuits.
If a RL is possible, it returns the new gate rule as a
2-tuple (LHS, RHS), where LHS is the left circuit and
and RHS is the right circuit of the new rule.
If a RL is not possible, None is returned.
Parameters
==========
left : Gate tuple
The left circuit of a gate rule expression.
right : Gate tuple
The right circuit of a gate rule expression.
Examples
========
Generate a new gate rule using a RL operation:
>>> from sympy.physics.quantum.identitysearch import rl_op
>>> from sympy.physics.quantum.gate import X, Y, Z
>>> x = X(0); y = Y(0); z = Z(0)
>>> rl_op((x,), (y, z))
((Y(0), X(0)), (Z(0),))
>>> rl_op((x, y), (z,))
((Z(0), X(0), Y(0)), ())
"""
if (len(right) > 0):
rl_gate = right[0]
rl_gate_is_unitary = is_scalar_matrix(
(Dagger(rl_gate), rl_gate), _get_min_qubits(rl_gate), True)
if (len(right) > 0 and rl_gate_is_unitary):
# Get the new right side w/o the leftmost gate
new_right = right[1:len(right)]
# Add the leftmost gate to the left position on the left side
new_left = (Dagger(rl_gate),) + left
# Return the new gate rule
return (new_left, new_right)
return None
def rr_op(left, right):
"""Perform a RR operation.
A RR operation multiplies both left and right circuits
with the dagger of the right circuit's rightmost gate, and
the dagger is multiplied on the right side of both circuits.
If a RR is possible, it returns the new gate rule as a
2-tuple (LHS, RHS), where LHS is the left circuit and
and RHS is the right circuit of the new rule.
If a RR is not possible, None is returned.
Parameters
==========
left : Gate tuple
The left circuit of a gate rule expression.
right : Gate tuple
The right circuit of a gate rule expression.
Examples
========
Generate a new gate rule using a RR operation:
>>> from sympy.physics.quantum.identitysearch import rr_op
>>> from sympy.physics.quantum.gate import X, Y, Z
>>> x = X(0); y = Y(0); z = Z(0)
>>> rr_op((x, y), (z,))
((X(0), Y(0), Z(0)), ())
>>> rr_op((x,), (y, z))
((X(0), Z(0)), (Y(0),))
"""
if (len(right) > 0):
rr_gate = right[len(right) - 1]
rr_gate_is_unitary = is_scalar_matrix(
(Dagger(rr_gate), rr_gate), _get_min_qubits(rr_gate), True)
if (len(right) > 0 and rr_gate_is_unitary):
# Get the new right side w/o the rightmost gate
new_right = right[0:len(right) - 1]
# Add the rightmost gate to the right position on the right side
new_left = left + (Dagger(rr_gate),)
# Return the new gate rule
return (new_left, new_right)
return None
def generate_gate_rules(gate_seq, return_as_muls=False):
"""Returns a set of gate rules. Each gate rules is represented
as a 2-tuple of tuples or Muls. An empty tuple represents an arbitrary
scalar value.
This function uses the four operations (LL, LR, RL, RR)
to generate the gate rules.
A gate rule is an expression such as ABC = D or AB = CD, where
A, B, C, and D are gates. Each value on either side of the
equal sign represents a circuit. The four operations allow
one to find a set of equivalent circuits from a gate identity.
The letters denoting the operation tell the user what
activities to perform on each expression. The first letter
indicates which side of the equal sign to focus on. The
second letter indicates which gate to focus on given the
side. Once this information is determined, the inverse
of the gate is multiplied on both circuits to create a new
gate rule.
For example, given the identity, ABCD = 1, a LL operation
means look at the left value and multiply both left sides by the
inverse of the leftmost gate A. If A is Hermitian, the inverse
of A is still A. The resulting new rule is BCD = A.
The following is a summary of the four operations. Assume
that in the examples, all gates are Hermitian.
LL : left circuit, left multiply
ABCD = E -> AABCD = AE -> BCD = AE
LR : left circuit, right multiply
ABCD = E -> ABCDD = ED -> ABC = ED
RL : right circuit, left multiply
ABC = ED -> EABC = EED -> EABC = D
RR : right circuit, right multiply
AB = CD -> ABD = CDD -> ABD = C
The number of gate rules generated is n*(n+1), where n
is the number of gates in the sequence (unproven).
Parameters
==========
gate_seq : Gate tuple, Mul, or Number
A variable length tuple or Mul of Gates whose product is equal to
a scalar matrix
return_as_muls : bool
True to return a set of Muls; False to return a set of tuples
Examples
========
Find the gate rules of the current circuit using tuples:
>>> from sympy.physics.quantum.identitysearch import generate_gate_rules
>>> from sympy.physics.quantum.gate import X, Y, Z
>>> x = X(0); y = Y(0); z = Z(0)
>>> generate_gate_rules((x, x))
set([((X(0),), (X(0),)), ((X(0), X(0)), ())])
>>> generate_gate_rules((x, y, z))
set([((), (X(0), Z(0), Y(0))), ((), (Y(0), X(0), Z(0))),
((), (Z(0), Y(0), X(0))), ((X(0),), (Z(0), Y(0))),
((Y(0),), (X(0), Z(0))), ((Z(0),), (Y(0), X(0))),
((X(0), Y(0)), (Z(0),)), ((Y(0), Z(0)), (X(0),)),
((Z(0), X(0)), (Y(0),)), ((X(0), Y(0), Z(0)), ()),
((Y(0), Z(0), X(0)), ()), ((Z(0), X(0), Y(0)), ())])
Find the gate rules of the current circuit using Muls:
>>> generate_gate_rules(x*x, return_as_muls=True)
set([(1, 1)])
>>> generate_gate_rules(x*y*z, return_as_muls=True)
set([(1, X(0)*Z(0)*Y(0)), (1, Y(0)*X(0)*Z(0)),
(1, Z(0)*Y(0)*X(0)), (X(0)*Y(0), Z(0)),
(Y(0)*Z(0), X(0)), (Z(0)*X(0), Y(0)),
(X(0)*Y(0)*Z(0), 1), (Y(0)*Z(0)*X(0), 1),
(Z(0)*X(0)*Y(0), 1), (X(0), Z(0)*Y(0)),
(Y(0), X(0)*Z(0)), (Z(0), Y(0)*X(0))])
"""
if isinstance(gate_seq, Number):
if return_as_muls:
return set([(Integer(1), Integer(1))])
else:
return set([((), ())])
elif isinstance(gate_seq, Mul):
gate_seq = gate_seq.args
# Each item in queue is a 3-tuple:
# i) first item is the left side of an equality
# ii) second item is the right side of an equality
# iii) third item is the number of operations performed
# The argument, gate_seq, will start on the left side, and
# the right side will be empty, implying the presence of an
# identity.
queue = deque()
# A set of gate rules
rules = set()
# Maximum number of operations to perform
max_ops = len(gate_seq)
def process_new_rule(new_rule, ops):
if new_rule is not None:
new_left, new_right = new_rule
if new_rule not in rules and (new_right, new_left) not in rules:
rules.add(new_rule)
# If haven't reached the max limit on operations
if ops + 1 < max_ops:
queue.append(new_rule + (ops + 1,))
queue.append((gate_seq, (), 0))
rules.add((gate_seq, ()))
while len(queue) > 0:
left, right, ops = queue.popleft()
# Do a LL
new_rule = ll_op(left, right)
process_new_rule(new_rule, ops)
# Do a LR
new_rule = lr_op(left, right)
process_new_rule(new_rule, ops)
# Do a RL
new_rule = rl_op(left, right)
process_new_rule(new_rule, ops)
# Do a RR
new_rule = rr_op(left, right)
process_new_rule(new_rule, ops)
if return_as_muls:
# Convert each rule as tuples into a rule as muls
mul_rules = set()
for rule in rules:
left, right = rule
mul_rules.add((Mul(*left), Mul(*right)))
rules = mul_rules
return rules
def generate_equivalent_ids(gate_seq, return_as_muls=False):
"""Returns a set of equivalent gate identities.
A gate identity is a quantum circuit such that the product
of the gates in the circuit is equal to a scalar value.
For example, XYZ = i, where X, Y, Z are the Pauli gates and
i is the imaginary value, is considered a gate identity.
This function uses the four operations (LL, LR, RL, RR)
to generate the gate rules and, subsequently, to locate equivalent
gate identities.
Note that all equivalent identities are reachable in n operations
from the starting gate identity, where n is the number of gates
in the sequence.
The max number of gate identities is 2n, where n is the number
of gates in the sequence (unproven).
Parameters
==========
gate_seq : Gate tuple, Mul, or Number
A variable length tuple or Mul of Gates whose product is equal to
a scalar matrix.
return_as_muls: bool
True to return as Muls; False to return as tuples
Examples
========
Find equivalent gate identities from the current circuit with tuples:
>>> from sympy.physics.quantum.identitysearch import generate_equivalent_ids
>>> from sympy.physics.quantum.gate import X, Y, Z
>>> x = X(0); y = Y(0); z = Z(0)
>>> generate_equivalent_ids((x, x))
set([(X(0), X(0))])
>>> generate_equivalent_ids((x, y, z))
set([(X(0), Y(0), Z(0)), (X(0), Z(0), Y(0)), (Y(0), X(0), Z(0)),
(Y(0), Z(0), X(0)), (Z(0), X(0), Y(0)), (Z(0), Y(0), X(0))])
Find equivalent gate identities from the current circuit with Muls:
>>> generate_equivalent_ids(x*x, return_as_muls=True)
set([1])
>>> generate_equivalent_ids(x*y*z, return_as_muls=True)
set([X(0)*Y(0)*Z(0), X(0)*Z(0)*Y(0), Y(0)*X(0)*Z(0),
Y(0)*Z(0)*X(0), Z(0)*X(0)*Y(0), Z(0)*Y(0)*X(0)])
"""
if isinstance(gate_seq, Number):
return set([Integer(1)])
elif isinstance(gate_seq, Mul):
gate_seq = gate_seq.args
# Filter through the gate rules and keep the rules
# with an empty tuple either on the left or right side
# A set of equivalent gate identities
eq_ids = set()
gate_rules = generate_gate_rules(gate_seq)
for rule in gate_rules:
l, r = rule
if l == ():
eq_ids.add(r)
elif r == ():
eq_ids.add(l)
if return_as_muls:
convert_to_mul = lambda id_seq: Mul(*id_seq)
eq_ids = set(map(convert_to_mul, eq_ids))
return eq_ids
class GateIdentity(Basic):
"""Wrapper class for circuits that reduce to a scalar value.
A gate identity is a quantum circuit such that the product
of the gates in the circuit is equal to a scalar value.
For example, XYZ = i, where X, Y, Z are the Pauli gates and
i is the imaginary value, is considered a gate identity.
Parameters
==========
args : Gate tuple
A variable length tuple of Gates that form an identity.
Examples
========
Create a GateIdentity and look at its attributes:
>>> from sympy.physics.quantum.identitysearch import GateIdentity
>>> from sympy.physics.quantum.gate import X, Y, Z
>>> x = X(0); y = Y(0); z = Z(0)
>>> an_identity = GateIdentity(x, y, z)
>>> an_identity.circuit
X(0)*Y(0)*Z(0)
>>> an_identity.equivalent_ids
set([(X(0), Y(0), Z(0)), (X(0), Z(0), Y(0)), (Y(0), X(0), Z(0)),
(Y(0), Z(0), X(0)), (Z(0), X(0), Y(0)), (Z(0), Y(0), X(0))])
"""
def __new__(cls, *args):
# args should be a tuple - a variable length argument list
obj = Basic.__new__(cls, *args)
obj._circuit = Mul(*args)
obj._rules = generate_gate_rules(args)
obj._eq_ids = generate_equivalent_ids(args)
return obj
@property
def circuit(self):
return self._circuit
@property
def gate_rules(self):
return self._rules
@property
def equivalent_ids(self):
return self._eq_ids
@property
def sequence(self):
return self.args
def __str__(self):
"""Returns the string of gates in a tuple."""
return str(self.circuit)
def is_degenerate(identity_set, gate_identity):
"""Checks if a gate identity is a permutation of another identity.
Parameters
==========
identity_set : set
A Python set with GateIdentity objects.
gate_identity : GateIdentity
The GateIdentity to check for existence in the set.
Examples
========
Check if the identity is a permutation of another identity:
>>> from sympy.physics.quantum.identitysearch import (
... GateIdentity, is_degenerate)
>>> from sympy.physics.quantum.gate import X, Y, Z
>>> x = X(0); y = Y(0); z = Z(0)
>>> an_identity = GateIdentity(x, y, z)
>>> id_set = set([an_identity])
>>> another_id = (y, z, x)
>>> is_degenerate(id_set, another_id)
True
>>> another_id = (x, x)
>>> is_degenerate(id_set, another_id)
False
"""
# For now, just iteratively go through the set and check if the current
# gate_identity is a permutation of an identity in the set
for an_id in identity_set:
if (gate_identity in an_id.equivalent_ids):
return True
return False
def is_reducible(circuit, nqubits, begin, end):
"""Determines if a circuit is reducible by checking
if its subcircuits are scalar values.
Parameters
==========
circuit : Gate tuple
A tuple of Gates representing a circuit. The circuit to check
if a gate identity is contained in a subcircuit.
nqubits : int
The number of qubits the circuit operates on.
begin : int
The leftmost gate in the circuit to include in a subcircuit.
end : int
The rightmost gate in the circuit to include in a subcircuit.
Examples
========
Check if the circuit can be reduced:
>>> from sympy.physics.quantum.identitysearch import (
... GateIdentity, is_reducible)
>>> from sympy.physics.quantum.gate import X, Y, Z
>>> x = X(0); y = Y(0); z = Z(0)
>>> is_reducible((x, y, z), 1, 0, 3)
True
Check if an interval in the circuit can be reduced:
>>> is_reducible((x, y, z), 1, 1, 3)
False
>>> is_reducible((x, y, y), 1, 1, 3)
True
"""
current_circuit = ()
# Start from the gate at "end" and go down to almost the gate at "begin"
for ndx in reversed(range(begin, end)):
next_gate = circuit[ndx]
current_circuit = (next_gate,) + current_circuit
# If a circuit as a matrix is equivalent to a scalar value
if (is_scalar_matrix(current_circuit, nqubits, False)):
return True
return False
def bfs_identity_search(gate_list, nqubits, max_depth=None,
identity_only=False):
"""Constructs a set of gate identities from the list of possible gates.
Performs a breadth first search over the space of gate identities.
This allows the finding of the shortest gate identities first.
Parameters
==========
gate_list : list, Gate
A list of Gates from which to search for gate identities.
nqubits : int
The number of qubits the quantum circuit operates on.
max_depth : int
The longest quantum circuit to construct from gate_list.
identity_only : bool
True to search for gate identities that reduce to identity;
False to search for gate identities that reduce to a scalar.
Examples
========
Find a list of gate identities:
>>> from sympy.physics.quantum.identitysearch import bfs_identity_search
>>> from sympy.physics.quantum.gate import X, Y, Z, H
>>> x = X(0); y = Y(0); z = Z(0)
>>> bfs_identity_search([x], 1, max_depth=2)
set([GateIdentity(X(0), X(0))])
>>> bfs_identity_search([x, y, z], 1)
set([GateIdentity(X(0), X(0)), GateIdentity(Y(0), Y(0)),
GateIdentity(Z(0), Z(0)), GateIdentity(X(0), Y(0), Z(0))])
Find a list of identities that only equal to 1:
>>> bfs_identity_search([x, y, z], 1, identity_only=True)
set([GateIdentity(X(0), X(0)), GateIdentity(Y(0), Y(0)),
GateIdentity(Z(0), Z(0))])
"""
if max_depth is None or max_depth <= 0:
max_depth = len(gate_list)
id_only = identity_only
# Start with an empty sequence (implicitly contains an IdentityGate)
queue = deque([()])
# Create an empty set of gate identities
ids = set()
# Begin searching for gate identities in given space.
while (len(queue) > 0):
current_circuit = queue.popleft()
for next_gate in gate_list:
new_circuit = current_circuit + (next_gate,)
# Determines if a (strict) subcircuit is a scalar matrix
circuit_reducible = is_reducible(new_circuit, nqubits,
1, len(new_circuit))
# In many cases when the matrix is a scalar value,
# the evaluated matrix will actually be an integer
if (is_scalar_matrix(new_circuit, nqubits, id_only) and
not is_degenerate(ids, new_circuit) and
not circuit_reducible):
ids.add(GateIdentity(*new_circuit))
elif (len(new_circuit) < max_depth and
not circuit_reducible):
queue.append(new_circuit)
return ids
def random_identity_search(gate_list, numgates, nqubits):
"""Randomly selects numgates from gate_list and checks if it is
a gate identity.
If the circuit is a gate identity, the circuit is returned;
Otherwise, None is returned.
"""
gate_size = len(gate_list)
circuit = ()
for i in range(numgates):
next_gate = gate_list[randint(0, gate_size - 1)]
circuit = circuit + (next_gate,)
is_scalar = is_scalar_matrix(circuit, nqubits, False)
return circuit if is_scalar else None
|
Droid-Concepts/DC-Elite_kernel_jf | refs/heads/dc43 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 12527 | # Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
|
idrogeno/enigma2 | refs/heads/master | lib/python/Screens/FactoryReset.py | 51 | from Screens.MessageBox import MessageBox
from boxbranding import getMachineBrand, getMachineName
class FactoryReset(MessageBox):
def __init__(self, session):
MessageBox.__init__(self, session, _("When you do a factory reset, you will lose ALL your configuration data\n"
"(including bouquets, services, satellite data ...)\n"
"After completion of factory reset, your %s %s will restart automatically!\n\n"
"Really do a factory reset?") % (getMachineBrand(), getMachineName()), MessageBox.TYPE_YESNO, default = False)
self.setTitle(_("Factory reset"))
self.skinName = "MessageBox"
|
crackerhead/nemio | refs/heads/master | lib/python2.7/site-packages/setuptools/command/install_scripts.py | 285 | from distutils.command.install_scripts import install_scripts \
as _install_scripts
from pkg_resources import Distribution, PathMetadata, ensure_directory
import os
from distutils import log
class install_scripts(_install_scripts):
"""Do normal script install, plus any egg_info wrapper scripts"""
def initialize_options(self):
_install_scripts.initialize_options(self)
self.no_ep = False
def run(self):
from setuptools.command.easy_install import get_script_args
from setuptools.command.easy_install import sys_executable
self.run_command("egg_info")
if self.distribution.scripts:
_install_scripts.run(self) # run first to set up self.outfiles
else:
self.outfiles = []
if self.no_ep:
# don't install entry point scripts into .egg file!
return
ei_cmd = self.get_finalized_command("egg_info")
dist = Distribution(
ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info),
ei_cmd.egg_name, ei_cmd.egg_version,
)
bs_cmd = self.get_finalized_command('build_scripts')
executable = getattr(bs_cmd,'executable',sys_executable)
is_wininst = getattr(
self.get_finalized_command("bdist_wininst"), '_is_running', False
)
for args in get_script_args(dist, executable, is_wininst):
self.write_script(*args)
def write_script(self, script_name, contents, mode="t", *ignored):
"""Write an executable file to the scripts directory"""
from setuptools.command.easy_install import chmod, current_umask
log.info("Installing %s script to %s", script_name, self.install_dir)
target = os.path.join(self.install_dir, script_name)
self.outfiles.append(target)
mask = current_umask()
if not self.dry_run:
ensure_directory(target)
f = open(target,"w"+mode)
f.write(contents)
f.close()
chmod(target, 0x1FF-mask) # 0777
|
youdonghai/intellij-community | refs/heads/master | python/lib/Lib/site-packages/django/template/response.py | 71 | from django.http import HttpResponse
from django.template import loader, Context, RequestContext
class ContentNotRenderedError(Exception):
pass
class SimpleTemplateResponse(HttpResponse):
def __init__(self, template, context=None, mimetype=None, status=None,
content_type=None):
# It would seem obvious to call these next two members 'template' and
# 'context', but those names are reserved as part of the test Client API.
# To avoid the name collision, we use
# tricky-to-debug problems
self.template_name = template
self.context_data = context
# _is_rendered tracks whether the template and context has been baked into
# a final response.
self._is_rendered = False
# content argument doesn't make sense here because it will be replaced
# with rendered template so we always pass empty string in order to
# prevent errors and provide shorter signature.
super(SimpleTemplateResponse, self).__init__('', mimetype, status,
content_type)
def resolve_template(self, template):
"Accepts a template object, path-to-template or list of paths"
if isinstance(template, (list, tuple)):
return loader.select_template(template)
elif isinstance(template, basestring):
return loader.get_template(template)
else:
return template
def resolve_context(self, context):
"""Convert context data into a full Context object
(assuming it isn't already a Context object).
"""
if isinstance(context, Context):
return context
else:
return Context(context)
@property
def rendered_content(self):
"""Returns the freshly rendered content for the template and context
described by the TemplateResponse.
This *does not* set the final content of the response. To set the
response content, you must either call render(), or set the
content explicitly using the value of this property.
"""
template = self.resolve_template(self.template_name)
context = self.resolve_context(self.context_data)
content = template.render(context)
return content
def render(self):
"""Render (thereby finalizing) the content of the response.
If the content has already been rendered, this is a no-op.
Returns the baked response instance.
"""
if not self._is_rendered:
self._set_content(self.rendered_content)
return self
is_rendered = property(lambda self: self._is_rendered)
def __iter__(self):
if not self._is_rendered:
raise ContentNotRenderedError('The response content must be rendered before it can be iterated over.')
return super(SimpleTemplateResponse, self).__iter__()
def _get_content(self):
if not self._is_rendered:
raise ContentNotRenderedError('The response content must be rendered before it can be accessed.')
return super(SimpleTemplateResponse, self)._get_content()
def _set_content(self, value):
"Overrides rendered content, unless you later call render()"
super(SimpleTemplateResponse, self)._set_content(value)
self._is_rendered = True
content = property(_get_content, _set_content)
class TemplateResponse(SimpleTemplateResponse):
def __init__(self, request, template, context=None, mimetype=None,
status=None, content_type=None):
# self.request gets over-written by django.test.client.Client - and
# unlike context_data and template_name the _request should not
# be considered part of the public API.
self._request = request
super(TemplateResponse, self).__init__(
template, context, mimetype, status, content_type)
def resolve_context(self, context):
"""Convert context data into a full RequestContext object
(assuming it isn't already a Context object).
"""
if isinstance(context, Context):
return context
else:
return RequestContext(self._request, context)
|
NhuanTDBK/Kaggle_StackedOverflow | refs/heads/master | is13/utils/tools.py | 3 | import random
def shuffle(lol, seed):
'''
lol :: list of list as input
seed :: seed the shuffling
shuffle inplace each list in the same order
'''
for l in lol:
random.seed(seed)
random.shuffle(l)
def minibatch(l, bs):
'''
l :: list of word idxs
return a list of minibatches of indexes
which size is equal to bs
border cases are treated as follow:
eg: [0,1,2,3] and bs = 3
will output:
[[0],[0,1],[0,1,2],[1,2,3]]
'''
out = [l[:i] for i in xrange(1, min(bs,len(l)+1) )]
out += [l[i-bs:i] for i in xrange(bs,len(l)+1) ]
assert len(l) == len(out)
return out
def contextwin(l, win):
'''
win :: int corresponding to the size of the window
given a list of indexes composing a sentence
it will return a list of list of indexes corresponding
to context windows surrounding each word in the sentence
'''
assert (win % 2) == 1
assert win >=1
l = list(l)
lpadded = win/2 * [-1] + l + win/2 * [-1]
out = [ lpadded[i:i+win] for i in range(len(l)) ]
assert len(out) == len(l)
return out
|
jaafarbarek/pyrtos | refs/heads/master | micropython-1.5/tests/bench/arrayop-1-list_inplace.py | 101 | # Array operation
# Type: list, inplace operation using for. What's good about this
# method is that it doesn't require any extra memory allocation.
import bench
def test(num):
for i in iter(range(num//10000)):
arr = [0] * 1000
for i in range(len(arr)):
arr[i] += 1
bench.run(test)
|
SMTorg/smt | refs/heads/master | smt/utils/test/test_kriging_utils.py | 2 | import unittest
import numpy as np
from smt.utils.sm_test_case import SMTestCase
from smt.utils.kriging_utils import standardization
from smt.sampling_methods import LHS
class Test(SMTestCase):
def test_standardization(self):
d, n = (10, 100)
sx = LHS(
xlimits=np.repeat(np.atleast_2d([0.0, 1.0]), d, axis=0),
criterion="m",
random_state=42,
)
X = sx(n)
sy = LHS(
xlimits=np.repeat(np.atleast_2d([0.0, 1.0]), 1, axis=0),
criterion="m",
random_state=42,
)
y = sy(n)
X_norm, _, _, _, _, _ = standardization(X, y, scale_X_to_unit=True)
interval = (np.min(X_norm), np.max(X_norm))
self.assertEqual((0, 1), interval)
if __name__ == "__main__":
unittest.main()
|
pgexperts/patroni | refs/heads/master | patroni/__init__.py | 2 | import logging
import os
import sys
import time
import yaml
from patroni.api import RestApiServer
from patroni.etcd import Etcd
from patroni.ha import Ha
from patroni.postgresql import Postgresql
from patroni.utils import setup_signal_handlers, reap_children
from patroni.zookeeper import ZooKeeper
logger = logging.getLogger(__name__)
class Patroni:
def __init__(self, config):
self.nap_time = config['loop_wait']
self.tags = config.get('tags', dict())
self.postgresql = Postgresql(config['postgresql'])
self.dcs = self.get_dcs(self.postgresql.name, config)
self.api = RestApiServer(self, config['restapi'])
self.ha = Ha(self)
self.next_run = time.time()
@property
def nofailover(self):
return self.tags.get('nofailover', False)
@staticmethod
def get_dcs(name, config):
if 'etcd' in config:
return Etcd(name, config['etcd'])
if 'zookeeper' in config:
return ZooKeeper(name, config['zookeeper'])
raise Exception('Can not find suitable configuration of distributed configuration store')
def schedule_next_run(self):
self.next_run += self.nap_time
current_time = time.time()
nap_time = self.next_run - current_time
if nap_time <= 0:
self.next_run = current_time
elif self.dcs.watch(nap_time):
self.next_run = time.time()
def run(self):
self.api.start()
self.next_run = time.time()
while True:
logger.info(self.ha.run_cycle())
reap_children()
self.schedule_next_run()
def main():
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s', level=logging.INFO)
logging.getLogger('requests').setLevel(logging.WARNING)
setup_signal_handlers()
if len(sys.argv) < 2 or not os.path.isfile(sys.argv[1]):
print('Usage: {} config.yml'.format(sys.argv[0]))
return
with open(sys.argv[1], 'r') as f:
config = yaml.load(f)
patroni = Patroni(config)
try:
patroni.run()
except KeyboardInterrupt:
pass
finally:
patroni.api.shutdown()
patroni.postgresql.stop()
patroni.dcs.delete_leader()
|
erikdejonge/youtube-dl | refs/heads/master | youtube_dl/extractor/newgrounds.py | 28 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
extract_attributes,
int_or_none,
parse_duration,
parse_filesize,
unified_timestamp,
)
class NewgroundsIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?newgrounds\.com/(?:audio/listen|portal/view)/(?P<id>[0-9]+)'
_TESTS = [{
'url': 'https://www.newgrounds.com/audio/listen/549479',
'md5': 'fe6033d297591288fa1c1f780386f07a',
'info_dict': {
'id': '549479',
'ext': 'mp3',
'title': 'B7 - BusMode',
'uploader': 'Burn7',
'timestamp': 1378878540,
'upload_date': '20130911',
'duration': 143,
},
}, {
'url': 'https://www.newgrounds.com/portal/view/673111',
'md5': '3394735822aab2478c31b1004fe5e5bc',
'info_dict': {
'id': '673111',
'ext': 'mp4',
'title': 'Dancin',
'uploader': 'Squirrelman82',
'timestamp': 1460256780,
'upload_date': '20160410',
},
}, {
# source format unavailable, additional mp4 formats
'url': 'http://www.newgrounds.com/portal/view/689400',
'info_dict': {
'id': '689400',
'ext': 'mp4',
'title': 'ZTV News Episode 8',
'uploader': 'BennettTheSage',
'timestamp': 1487965140,
'upload_date': '20170224',
},
'params': {
'skip_download': True,
},
}]
def _real_extract(self, url):
media_id = self._match_id(url)
webpage = self._download_webpage(url, media_id)
title = self._html_search_regex(
r'<title>([^>]+)</title>', webpage, 'title')
media_url = self._parse_json(self._search_regex(
r'"url"\s*:\s*("[^"]+"),', webpage, ''), media_id)
formats = [{
'url': media_url,
'format_id': 'source',
'quality': 1,
}]
max_resolution = int_or_none(self._search_regex(
r'max_resolution["\']\s*:\s*(\d+)', webpage, 'max resolution',
default=None))
if max_resolution:
url_base = media_url.rpartition('.')[0]
for resolution in (360, 720, 1080):
if resolution > max_resolution:
break
formats.append({
'url': '%s.%dp.mp4' % (url_base, resolution),
'format_id': '%dp' % resolution,
'height': resolution,
})
self._check_formats(formats, media_id)
self._sort_formats(formats)
uploader = self._html_search_regex(
(r'(?s)<h4[^>]*>(.+?)</h4>.*?<em>\s*Author\s*</em>',
r'(?:Author|Writer)\s*<a[^>]+>([^<]+)'), webpage, 'uploader',
fatal=False)
timestamp = unified_timestamp(self._html_search_regex(
(r'<dt>\s*Uploaded\s*</dt>\s*<dd>([^<]+</dd>\s*<dd>[^<]+)',
r'<dt>\s*Uploaded\s*</dt>\s*<dd>([^<]+)'), webpage, 'timestamp',
default=None))
duration = parse_duration(self._search_regex(
r'(?s)<dd>\s*Song\s*</dd>\s*<dd>.+?</dd>\s*<dd>([^<]+)', webpage,
'duration', default=None))
filesize_approx = parse_filesize(self._html_search_regex(
r'(?s)<dd>\s*Song\s*</dd>\s*<dd>(.+?)</dd>', webpage, 'filesize',
default=None))
if len(formats) == 1:
formats[0]['filesize_approx'] = filesize_approx
if '<dd>Song' in webpage:
formats[0]['vcodec'] = 'none'
return {
'id': media_id,
'title': title,
'uploader': uploader,
'timestamp': timestamp,
'duration': duration,
'formats': formats,
}
class NewgroundsPlaylistIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?newgrounds\.com/(?:collection|[^/]+/search/[^/]+)/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://www.newgrounds.com/collection/cats',
'info_dict': {
'id': 'cats',
'title': 'Cats',
},
'playlist_mincount': 46,
}, {
'url': 'http://www.newgrounds.com/portal/search/author/ZONE-SAMA',
'info_dict': {
'id': 'ZONE-SAMA',
'title': 'Portal Search: ZONE-SAMA',
},
'playlist_mincount': 47,
}, {
'url': 'http://www.newgrounds.com/audio/search/title/cats',
'only_matching': True,
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
title = self._search_regex(
r'<title>([^>]+)</title>', webpage, 'title', default=None)
# cut left menu
webpage = self._search_regex(
r'(?s)<div[^>]+\bclass=["\']column wide(.+)',
webpage, 'wide column', default=webpage)
entries = []
for a, path, media_id in re.findall(
r'(<a[^>]+\bhref=["\']/?((?:portal/view|audio/listen)/(\d+))[^>]+>)',
webpage):
a_class = extract_attributes(a).get('class')
if a_class not in ('item-portalsubmission', 'item-audiosubmission'):
continue
entries.append(
self.url_result(
'https://www.newgrounds.com/%s' % path,
ie=NewgroundsIE.ie_key(), video_id=media_id))
return self.playlist_result(entries, playlist_id, title)
|
vrsys/avangong | refs/heads/master | avango-connect/python/avango/connect/_SFVec4.py | 6 | # -*- Mode:Python -*-
##########################################################################
# #
# This file is part of AVANGO. #
# #
# Copyright 1997 - 2009 Fraunhofer-Gesellschaft zur Foerderung der #
# angewandten Forschung (FhG), Munich, Germany. #
# #
# AVANGO is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Lesser General Public License as #
# published by the Free Software Foundation, version 3. #
# #
# AVANGO is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU Lesser General Public #
# License along with AVANGO. If not, see <http://www.gnu.org/licenses/>. #
# #
##########################################################################
import avango.osg
from _registry import _register_field
class SFVec4Descriptor(object):
'Simple stream support for SFVec4'
key = "SFVec4"
def write(self, field, hout):
if field.value:
hout.write('\x00')
value = []
v = field.value
value.append(str(v.x))
value.append(str(v.y))
value.append(str(v.z))
value.append(str(v.w))
hout.write('\x00'.join(value))
def read(self, line):
field = avango.osg.SFVec4()
for x in zip(line[::4], line[1::4], line[2::4], line[3::4]):
vec = avango.osg.Vec4(float(x[0]), float(x[1]), float(x[2]), float(x[3]))
field.value = vec
return field
_register_field(avango.osg.SFVec4, SFVec4Descriptor())
|
sorenk/ansible | refs/heads/devel | lib/ansible/modules/cloud/cloudstack/cs_network.py | 20 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, René Moser <mail@renemoser.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_network
short_description: Manages networks on Apache CloudStack based clouds.
description:
- Create, update, restart and delete networks.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
name:
description:
- Name (case sensitive) of the network.
required: true
display_text:
description:
- Display text of the network.
- If not specified, C(name) will be used as C(display_text).
network_offering:
description:
- Name of the offering for the network.
- Required if C(state=present).
start_ip:
description:
- The beginning IPv4 address of the network belongs to.
- Only considered on create.
end_ip:
description:
- The ending IPv4 address of the network belongs to.
- If not specified, value of C(start_ip) is used.
- Only considered on create.
gateway:
description:
- The gateway of the network.
- Required for shared networks and isolated networks when it belongs to a VPC.
- Only considered on create.
netmask:
description:
- The netmask of the network.
- Required for shared networks and isolated networks when it belongs to a VPC.
- Only considered on create.
start_ipv6:
description:
- The beginning IPv6 address of the network belongs to.
- Only considered on create.
end_ipv6:
description:
- The ending IPv6 address of the network belongs to.
- If not specified, value of C(start_ipv6) is used.
- Only considered on create.
cidr_ipv6:
description:
- CIDR of IPv6 network, must be at least /64.
- Only considered on create.
gateway_ipv6:
description:
- The gateway of the IPv6 network.
- Required for shared networks.
- Only considered on create.
vlan:
description:
- The ID or VID of the network.
vpc:
description:
- Name of the VPC of the network.
isolated_pvlan:
description:
- The isolated private VLAN for this network.
clean_up:
description:
- Cleanup old network elements.
- Only considered on C(state=restarted).
default: no
type: bool
acl_type:
description:
- Access control type for the VPC network tier.
- Only considered on create.
default: account
choices: [ account, domain ]
acl:
description:
- The name of the access control list for the VPC network tier.
version_added: "2.5"
subdomain_access:
description:
- Defines whether to allow subdomains to use networks dedicated to their parent domain(s).
- Should be used with C(acl_type=domain).
- Only considered on create.
type: bool
version_added: "2.5"
network_domain:
description:
- The network domain.
state:
description:
- State of the network.
default: present
choices: [ present, absent, restarted ]
zone:
description:
- Name of the zone in which the network should be deployed.
- If not set, default zone is used.
project:
description:
- Name of the project the network to be deployed in.
domain:
description:
- Domain the network is related to.
account:
description:
- Account the network is related to.
poll_async:
description:
- Poll async jobs until job has finished.
default: yes
type: bool
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
- name: Create a network
local_action:
module: cs_network
name: my network
zone: gva-01
network_offering: DefaultIsolatedNetworkOfferingWithSourceNatService
network_domain: example.com
- name: Create a VPC tier
local_action:
module: cs_network
name: my VPC tier 1
zone: gva-01
vpc: my VPC
network_offering: DefaultIsolatedNetworkOfferingForVpcNetworks
gateway: 10.43.0.1
netmask: 255.255.255.0
acl: my web acl
- name: Update a network
local_action:
module: cs_network
name: my network
display_text: network of domain example.local
network_domain: example.local
- name: Restart a network with clean up
local_action:
module: cs_network
name: my network
clean_up: yes
state: restared
- name: Remove a network
local_action:
module: cs_network
name: my network
state: absent
'''
RETURN = '''
---
id:
description: UUID of the network.
returned: success
type: string
sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
name:
description: Name of the network.
returned: success
type: string
sample: web project
display_text:
description: Display text of the network.
returned: success
type: string
sample: web project
dns1:
description: IP address of the 1st nameserver.
returned: success
type: string
sample: 1.2.3.4
dns2:
description: IP address of the 2nd nameserver.
returned: success
type: string
sample: 1.2.3.4
cidr:
description: IPv4 network CIDR.
returned: success
type: string
sample: 10.101.64.0/24
gateway:
description: IPv4 gateway.
returned: success
type: string
sample: 10.101.64.1
netmask:
description: IPv4 netmask.
returned: success
type: string
sample: 255.255.255.0
cidr_ipv6:
description: IPv6 network CIDR.
returned: success
type: string
sample: 2001:db8::/64
gateway_ipv6:
description: IPv6 gateway.
returned: success
type: string
sample: 2001:db8::1
zone:
description: Name of zone.
returned: success
type: string
sample: ch-gva-2
domain:
description: Domain the network is related to.
returned: success
type: string
sample: ROOT
account:
description: Account the network is related to.
returned: success
type: string
sample: example account
project:
description: Name of project.
returned: success
type: string
sample: Production
tags:
description: List of resource tags associated with the network.
returned: success
type: dict
sample: '[ { "key": "foo", "value": "bar" } ]'
acl_type:
description: Access type of the VPC network tier (Domain, Account).
returned: success
type: string
sample: Account
acl:
description: Name of the access control list for the VPC network tier.
returned: success
type: string
sample: My ACL
version_added: "2.5"
acl_id:
description: ID of the access control list for the VPC network tier.
returned: success
type: string
sample: dfafcd55-0510-4b8c-b6c5-b8cedb4cfd88
version_added: "2.5"
broadcast_domain_type:
description: Broadcast domain type of the network.
returned: success
type: string
sample: Vlan
type:
description: Type of the network.
returned: success
type: string
sample: Isolated
traffic_type:
description: Traffic type of the network.
returned: success
type: string
sample: Guest
state:
description: State of the network (Allocated, Implemented, Setup).
returned: success
type: string
sample: Allocated
is_persistent:
description: Whether the network is persistent or not.
returned: success
type: boolean
sample: false
network_domain:
description: The network domain
returned: success
type: string
sample: example.local
network_offering:
description: The network offering name.
returned: success
type: string
sample: DefaultIsolatedNetworkOfferingWithSourceNatService
network_offering_display_text:
description: The network offering display text.
returned: success
type: string
sample: Offering for Isolated Vpc networks with Source Nat service enabled
version_added: "2.5"
network_offering_conserve_mode:
description: Whether the network offering has IP conserve mode enabled or not.
returned: success
type: bool
sample: false
version_added: "2.5"
network_offering_availability:
description: The availability of the network offering the network is created from
returned: success
type: string
sample: Optional
version_added: "2.5"
is_system:
description: Whether the network is system related or not.
returned: success
type: bool
sample: false
version_added: "2.5"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together,
)
class AnsibleCloudStackNetwork(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackNetwork, self).__init__(module)
self.returns = {
'networkdomain': 'network_domain',
'networkofferingname': 'network_offering',
'networkofferingdisplaytext': 'network_offering_display_text',
'networkofferingconservemode': 'network_offering_conserve_mode',
'networkofferingavailability': 'network_offering_availability',
'aclid': 'acl_id',
'issystem': 'is_system',
'ispersistent': 'is_persistent',
'acltype': 'acl_type',
'type': 'type',
'traffictype': 'traffic_type',
'ip6gateway': 'gateway_ipv6',
'ip6cidr': 'cidr_ipv6',
'gateway': 'gateway',
'cidr': 'cidr',
'netmask': 'netmask',
'broadcastdomaintype': 'broadcast_domain_type',
'dns1': 'dns1',
'dns2': 'dns2',
}
self.network = None
def get_network_acl(self, key=None, acl_id=None):
if acl_id is not None:
args = {
'id': acl_id,
'vpcid': self.get_vpc(key='id'),
}
else:
acl_name = self.module.params.get('acl')
if not acl_name:
return
args = {
'name': acl_name,
'vpcid': self.get_vpc(key='id'),
}
network_acls = self.query_api('listNetworkACLLists', **args)
if network_acls:
acl = network_acls['networkacllist'][0]
return self._get_by_key(key, acl)
def get_network_offering(self, key=None):
network_offering = self.module.params.get('network_offering')
if not network_offering:
self.module.fail_json(msg="missing required arguments: network_offering")
args = {
'zoneid': self.get_zone(key='id')
}
network_offerings = self.query_api('listNetworkOfferings', **args)
if network_offerings:
for no in network_offerings['networkoffering']:
if network_offering in [no['name'], no['displaytext'], no['id']]:
return self._get_by_key(key, no)
self.module.fail_json(msg="Network offering '%s' not found" % network_offering)
def _get_args(self):
args = {
'name': self.module.params.get('name'),
'displaytext': self.get_or_fallback('display_text', 'name'),
'networkdomain': self.module.params.get('network_domain'),
'networkofferingid': self.get_network_offering(key='id')
}
return args
def get_network(self, refresh=False):
if not self.network or refresh:
network = self.module.params.get('name')
args = {
'zoneid': self.get_zone(key='id'),
'projectid': self.get_project(key='id'),
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'vpcid': self.get_vpc(key='id'),
}
networks = self.query_api('listNetworks', **args)
if networks:
for n in networks['network']:
if network in [n['name'], n['displaytext'], n['id']]:
self.network = n
self.network['acl'] = self.get_network_acl(key='name', acl_id=n.get('aclid'))
break
return self.network
def present_network(self):
if self.module.params.get('acl') is not None and self.module.params.get('vpc') is None:
self.module.fail_json(msg="Missing required params: vpc")
network = self.get_network()
if not network:
network = self.create_network(network)
else:
network = self.update_network(network)
return network
def update_network(self, network):
args = self._get_args()
args['id'] = network['id']
if self.has_changed(args, network):
self.result['changed'] = True
if not self.module.check_mode:
network = self.query_api('updateNetwork', **args)
poll_async = self.module.params.get('poll_async')
if network and poll_async:
network = self.poll_job(network, 'network')
# Skip ACL check if the network is not a VPC tier
if network.get('aclid') != self.get_network_acl(key='id'):
self.result['changed'] = True
if not self.module.check_mode:
args = {
'aclid': self.get_network_acl(key='id'),
'networkid': network['id'],
}
network = self.query_api('replaceNetworkACLList', **args)
if self.module.params.get('poll_async'):
self.poll_job(network, 'networkacllist')
network = self.get_network(refresh=True)
return network
def create_network(self, network):
self.result['changed'] = True
args = self._get_args()
args.update({
'acltype': self.module.params.get('acl_type'),
'aclid': self.get_network_acl(key='id'),
'zoneid': self.get_zone(key='id'),
'projectid': self.get_project(key='id'),
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'startip': self.module.params.get('start_ip'),
'endip': self.get_or_fallback('end_ip', 'start_ip'),
'netmask': self.module.params.get('netmask'),
'gateway': self.module.params.get('gateway'),
'startipv6': self.module.params.get('start_ipv6'),
'endipv6': self.get_or_fallback('end_ipv6', 'start_ipv6'),
'ip6cidr': self.module.params.get('cidr_ipv6'),
'ip6gateway': self.module.params.get('gateway_ipv6'),
'vlan': self.module.params.get('vlan'),
'isolatedpvlan': self.module.params.get('isolated_pvlan'),
'subdomainaccess': self.module.params.get('subdomain_access'),
'vpcid': self.get_vpc(key='id')
})
if not self.module.check_mode:
res = self.query_api('createNetwork', **args)
network = res['network']
return network
def restart_network(self):
network = self.get_network()
if not network:
self.module.fail_json(msg="No network named '%s' found." % self.module.params('name'))
# Restarting only available for these states
if network['state'].lower() in ['implemented', 'setup']:
self.result['changed'] = True
args = {
'id': network['id'],
'cleanup': self.module.params.get('clean_up')
}
if not self.module.check_mode:
network = self.query_api('restartNetwork', **args)
poll_async = self.module.params.get('poll_async')
if network and poll_async:
network = self.poll_job(network, 'network')
return network
def absent_network(self):
network = self.get_network()
if network:
self.result['changed'] = True
args = {
'id': network['id']
}
if not self.module.check_mode:
res = self.query_api('deleteNetwork', **args)
poll_async = self.module.params.get('poll_async')
if res and poll_async:
self.poll_job(res, 'network')
return network
def get_result(self, network):
super(AnsibleCloudStackNetwork, self).get_result(network)
if network:
self.result['acl'] = self.get_network_acl(key='name', acl_id=network.get('aclid'))
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
display_text=dict(),
network_offering=dict(),
zone=dict(),
start_ip=dict(),
end_ip=dict(),
gateway=dict(),
netmask=dict(),
start_ipv6=dict(),
end_ipv6=dict(),
cidr_ipv6=dict(),
gateway_ipv6=dict(),
vlan=dict(),
vpc=dict(),
isolated_pvlan=dict(),
clean_up=dict(type='bool', default=False),
network_domain=dict(),
subdomain_access=dict(type='bool'),
state=dict(choices=['present', 'absent', 'restarted'], default='present'),
acl=dict(),
acl_type=dict(choices=['account', 'domain']),
project=dict(),
domain=dict(),
account=dict(),
poll_async=dict(type='bool', default=True),
))
required_together = cs_required_together()
required_together.extend([
['netmask', 'gateway'],
])
module = AnsibleModule(
argument_spec=argument_spec,
required_together=required_together,
supports_check_mode=True
)
acs_network = AnsibleCloudStackNetwork(module)
state = module.params.get('state')
if state == 'absent':
network = acs_network.absent_network()
elif state == 'restarted':
network = acs_network.restart_network()
else:
network = acs_network.present_network()
result = acs_network.get_result(network)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
fbidu/namebench | refs/heads/master | nb_third_party/jinja2/utils.py | 189 | # -*- coding: utf-8 -*-
"""
jinja2.utils
~~~~~~~~~~~~
Utility functions.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import re
import sys
import errno
try:
from thread import allocate_lock
except ImportError:
from dummy_thread import allocate_lock
from collections import deque
from itertools import imap
_word_split_re = re.compile(r'(\s+)')
_punctuation_re = re.compile(
'^(?P<lead>(?:%s)*)(?P<middle>.*?)(?P<trail>(?:%s)*)$' % (
'|'.join(imap(re.escape, ('(', '<', '<'))),
'|'.join(imap(re.escape, ('.', ',', ')', '>', '\n', '>')))
)
)
_simple_email_re = re.compile(r'^\S+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+$')
_striptags_re = re.compile(r'(<!--.*?-->|<[^>]*>)')
_entity_re = re.compile(r'&([^;]+);')
_letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
_digits = '0123456789'
# special singleton representing missing values for the runtime
missing = type('MissingType', (), {'__repr__': lambda x: 'missing'})()
# internal code
internal_code = set()
# concatenate a list of strings and convert them to unicode.
# unfortunately there is a bug in python 2.4 and lower that causes
# unicode.join trash the traceback.
_concat = u''.join
try:
def _test_gen_bug():
raise TypeError(_test_gen_bug)
yield None
_concat(_test_gen_bug())
except TypeError, _error:
if not _error.args or _error.args[0] is not _test_gen_bug:
def concat(gen):
try:
return _concat(list(gen))
except:
# this hack is needed so that the current frame
# does not show up in the traceback.
exc_type, exc_value, tb = sys.exc_info()
raise exc_type, exc_value, tb.tb_next
else:
concat = _concat
del _test_gen_bug, _error
# for python 2.x we create outselves a next() function that does the
# basics without exception catching.
try:
next = next
except NameError:
def next(x):
return x.next()
# if this python version is unable to deal with unicode filenames
# when passed to encode we let this function encode it properly.
# This is used in a couple of places. As far as Jinja is concerned
# filenames are unicode *or* bytestrings in 2.x and unicode only in
# 3.x because compile cannot handle bytes
if sys.version_info < (3, 0):
def _encode_filename(filename):
if isinstance(filename, unicode):
return filename.encode('utf-8')
return filename
else:
def _encode_filename(filename):
assert filename is None or isinstance(filename, str), \
'filenames must be strings'
return filename
from keyword import iskeyword as is_python_keyword
# common types. These do exist in the special types module too which however
# does not exist in IronPython out of the box. Also that way we don't have
# to deal with implementation specific stuff here
class _C(object):
def method(self): pass
def _func():
yield None
FunctionType = type(_func)
GeneratorType = type(_func())
MethodType = type(_C.method)
CodeType = type(_C.method.func_code)
try:
raise TypeError()
except TypeError:
_tb = sys.exc_info()[2]
TracebackType = type(_tb)
FrameType = type(_tb.tb_frame)
del _C, _tb, _func
def contextfunction(f):
"""This decorator can be used to mark a function or method context callable.
A context callable is passed the active :class:`Context` as first argument when
called from the template. This is useful if a function wants to get access
to the context or functions provided on the context object. For example
a function that returns a sorted list of template variables the current
template exports could look like this::
@contextfunction
def get_exported_names(context):
return sorted(context.exported_vars)
"""
f.contextfunction = True
return f
def evalcontextfunction(f):
"""This decoraotr can be used to mark a function or method as an eval
context callable. This is similar to the :func:`contextfunction`
but instead of passing the context, an evaluation context object is
passed. For more information about the eval context, see
:ref:`eval-context`.
.. versionadded:: 2.4
"""
f.evalcontextfunction = True
return f
def environmentfunction(f):
"""This decorator can be used to mark a function or method as environment
callable. This decorator works exactly like the :func:`contextfunction`
decorator just that the first argument is the active :class:`Environment`
and not context.
"""
f.environmentfunction = True
return f
def internalcode(f):
"""Marks the function as internally used"""
internal_code.add(f.func_code)
return f
def is_undefined(obj):
"""Check if the object passed is undefined. This does nothing more than
performing an instance check against :class:`Undefined` but looks nicer.
This can be used for custom filters or tests that want to react to
undefined variables. For example a custom default filter can look like
this::
def default(var, default=''):
if is_undefined(var):
return default
return var
"""
from jinja2.runtime import Undefined
return isinstance(obj, Undefined)
def consume(iterable):
"""Consumes an iterable without doing anything with it."""
for event in iterable:
pass
def clear_caches():
"""Jinja2 keeps internal caches for environments and lexers. These are
used so that Jinja2 doesn't have to recreate environments and lexers all
the time. Normally you don't have to care about that but if you are
messuring memory consumption you may want to clean the caches.
"""
from jinja2.environment import _spontaneous_environments
from jinja2.lexer import _lexer_cache
_spontaneous_environments.clear()
_lexer_cache.clear()
def import_string(import_name, silent=False):
"""Imports an object based on a string. This use useful if you want to
use import paths as endpoints or something similar. An import path can
be specified either in dotted notation (``xml.sax.saxutils.escape``)
or with a colon as object delimiter (``xml.sax.saxutils:escape``).
If the `silent` is True the return value will be `None` if the import
fails.
:return: imported object
"""
try:
if ':' in import_name:
module, obj = import_name.split(':', 1)
elif '.' in import_name:
items = import_name.split('.')
module = '.'.join(items[:-1])
obj = items[-1]
else:
return __import__(import_name)
return getattr(__import__(module, None, None, [obj]), obj)
except (ImportError, AttributeError):
if not silent:
raise
def open_if_exists(filename, mode='rb'):
"""Returns a file descriptor for the filename if that file exists,
otherwise `None`.
"""
try:
return open(filename, mode)
except IOError, e:
if e.errno not in (errno.ENOENT, errno.EISDIR):
raise
def object_type_repr(obj):
"""Returns the name of the object's type. For some recognized
singletons the name of the object is returned instead. (For
example for `None` and `Ellipsis`).
"""
if obj is None:
return 'None'
elif obj is Ellipsis:
return 'Ellipsis'
if obj.__class__.__module__ == '__builtin__':
name = obj.__class__.__name__
else:
name = obj.__class__.__module__ + '.' + obj.__class__.__name__
return '%s object' % name
def pformat(obj, verbose=False):
"""Prettyprint an object. Either use the `pretty` library or the
builtin `pprint`.
"""
try:
from pretty import pretty
return pretty(obj, verbose=verbose)
except ImportError:
from pprint import pformat
return pformat(obj)
def urlize(text, trim_url_limit=None, nofollow=False):
"""Converts any URLs in text into clickable links. Works on http://,
https:// and www. links. Links can have trailing punctuation (periods,
commas, close-parens) and leading punctuation (opening parens) and
it'll still do the right thing.
If trim_url_limit is not None, the URLs in link text will be limited
to trim_url_limit characters.
If nofollow is True, the URLs in link text will get a rel="nofollow"
attribute.
"""
trim_url = lambda x, limit=trim_url_limit: limit is not None \
and (x[:limit] + (len(x) >=limit and '...'
or '')) or x
words = _word_split_re.split(unicode(escape(text)))
nofollow_attr = nofollow and ' rel="nofollow"' or ''
for i, word in enumerate(words):
match = _punctuation_re.match(word)
if match:
lead, middle, trail = match.groups()
if middle.startswith('www.') or (
'@' not in middle and
not middle.startswith('http://') and
len(middle) > 0 and
middle[0] in _letters + _digits and (
middle.endswith('.org') or
middle.endswith('.net') or
middle.endswith('.com')
)):
middle = '<a href="http://%s"%s>%s</a>' % (middle,
nofollow_attr, trim_url(middle))
if middle.startswith('http://') or \
middle.startswith('https://'):
middle = '<a href="%s"%s>%s</a>' % (middle,
nofollow_attr, trim_url(middle))
if '@' in middle and not middle.startswith('www.') and \
not ':' in middle and _simple_email_re.match(middle):
middle = '<a href="mailto:%s">%s</a>' % (middle, middle)
if lead + middle + trail != word:
words[i] = lead + middle + trail
return u''.join(words)
def generate_lorem_ipsum(n=5, html=True, min=20, max=100):
"""Generate some lorem impsum for the template."""
from jinja2.constants import LOREM_IPSUM_WORDS
from random import choice, randrange
words = LOREM_IPSUM_WORDS.split()
result = []
for _ in xrange(n):
next_capitalized = True
last_comma = last_fullstop = 0
word = None
last = None
p = []
# each paragraph contains out of 20 to 100 words.
for idx, _ in enumerate(xrange(randrange(min, max))):
while True:
word = choice(words)
if word != last:
last = word
break
if next_capitalized:
word = word.capitalize()
next_capitalized = False
# add commas
if idx - randrange(3, 8) > last_comma:
last_comma = idx
last_fullstop += 2
word += ','
# add end of sentences
if idx - randrange(10, 20) > last_fullstop:
last_comma = last_fullstop = idx
word += '.'
next_capitalized = True
p.append(word)
# ensure that the paragraph ends with a dot.
p = u' '.join(p)
if p.endswith(','):
p = p[:-1] + '.'
elif not p.endswith('.'):
p += '.'
result.append(p)
if not html:
return u'\n\n'.join(result)
return Markup(u'\n'.join(u'<p>%s</p>' % escape(x) for x in result))
class Markup(unicode):
r"""Marks a string as being safe for inclusion in HTML/XML output without
needing to be escaped. This implements the `__html__` interface a couple
of frameworks and web applications use. :class:`Markup` is a direct
subclass of `unicode` and provides all the methods of `unicode` just that
it escapes arguments passed and always returns `Markup`.
The `escape` function returns markup objects so that double escaping can't
happen. If you want to use autoescaping in Jinja just enable the
autoescaping feature in the environment.
The constructor of the :class:`Markup` class can be used for three
different things: When passed an unicode object it's assumed to be safe,
when passed an object with an HTML representation (has an `__html__`
method) that representation is used, otherwise the object passed is
converted into a unicode string and then assumed to be safe:
>>> Markup("Hello <em>World</em>!")
Markup(u'Hello <em>World</em>!')
>>> class Foo(object):
... def __html__(self):
... return '<a href="#">foo</a>'
...
>>> Markup(Foo())
Markup(u'<a href="#">foo</a>')
If you want object passed being always treated as unsafe you can use the
:meth:`escape` classmethod to create a :class:`Markup` object:
>>> Markup.escape("Hello <em>World</em>!")
Markup(u'Hello <em>World</em>!')
Operations on a markup string are markup aware which means that all
arguments are passed through the :func:`escape` function:
>>> em = Markup("<em>%s</em>")
>>> em % "foo & bar"
Markup(u'<em>foo & bar</em>')
>>> strong = Markup("<strong>%(text)s</strong>")
>>> strong % {'text': '<blink>hacker here</blink>'}
Markup(u'<strong><blink>hacker here</blink></strong>')
>>> Markup("<em>Hello</em> ") + "<foo>"
Markup(u'<em>Hello</em> <foo>')
"""
__slots__ = ()
def __new__(cls, base=u'', encoding=None, errors='strict'):
if hasattr(base, '__html__'):
base = base.__html__()
if encoding is None:
return unicode.__new__(cls, base)
return unicode.__new__(cls, base, encoding, errors)
def __html__(self):
return self
def __add__(self, other):
if hasattr(other, '__html__') or isinstance(other, basestring):
return self.__class__(unicode(self) + unicode(escape(other)))
return NotImplemented
def __radd__(self, other):
if hasattr(other, '__html__') or isinstance(other, basestring):
return self.__class__(unicode(escape(other)) + unicode(self))
return NotImplemented
def __mul__(self, num):
if isinstance(num, (int, long)):
return self.__class__(unicode.__mul__(self, num))
return NotImplemented
__rmul__ = __mul__
def __mod__(self, arg):
if isinstance(arg, tuple):
arg = tuple(imap(_MarkupEscapeHelper, arg))
else:
arg = _MarkupEscapeHelper(arg)
return self.__class__(unicode.__mod__(self, arg))
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
unicode.__repr__(self)
)
def join(self, seq):
return self.__class__(unicode.join(self, imap(escape, seq)))
join.__doc__ = unicode.join.__doc__
def split(self, *args, **kwargs):
return map(self.__class__, unicode.split(self, *args, **kwargs))
split.__doc__ = unicode.split.__doc__
def rsplit(self, *args, **kwargs):
return map(self.__class__, unicode.rsplit(self, *args, **kwargs))
rsplit.__doc__ = unicode.rsplit.__doc__
def splitlines(self, *args, **kwargs):
return map(self.__class__, unicode.splitlines(self, *args, **kwargs))
splitlines.__doc__ = unicode.splitlines.__doc__
def unescape(self):
r"""Unescape markup again into an unicode string. This also resolves
known HTML4 and XHTML entities:
>>> Markup("Main » <em>About</em>").unescape()
u'Main \xbb <em>About</em>'
"""
from jinja2.constants import HTML_ENTITIES
def handle_match(m):
name = m.group(1)
if name in HTML_ENTITIES:
return unichr(HTML_ENTITIES[name])
try:
if name[:2] in ('#x', '#X'):
return unichr(int(name[2:], 16))
elif name.startswith('#'):
return unichr(int(name[1:]))
except ValueError:
pass
return u''
return _entity_re.sub(handle_match, unicode(self))
def striptags(self):
r"""Unescape markup into an unicode string and strip all tags. This
also resolves known HTML4 and XHTML entities. Whitespace is
normalized to one:
>>> Markup("Main » <em>About</em>").striptags()
u'Main \xbb About'
"""
stripped = u' '.join(_striptags_re.sub('', self).split())
return Markup(stripped).unescape()
@classmethod
def escape(cls, s):
"""Escape the string. Works like :func:`escape` with the difference
that for subclasses of :class:`Markup` this function would return the
correct subclass.
"""
rv = escape(s)
if rv.__class__ is not cls:
return cls(rv)
return rv
def make_wrapper(name):
orig = getattr(unicode, name)
def func(self, *args, **kwargs):
args = _escape_argspec(list(args), enumerate(args))
_escape_argspec(kwargs, kwargs.iteritems())
return self.__class__(orig(self, *args, **kwargs))
func.__name__ = orig.__name__
func.__doc__ = orig.__doc__
return func
for method in '__getitem__', 'capitalize', \
'title', 'lower', 'upper', 'replace', 'ljust', \
'rjust', 'lstrip', 'rstrip', 'center', 'strip', \
'translate', 'expandtabs', 'swapcase', 'zfill':
locals()[method] = make_wrapper(method)
# new in python 2.5
if hasattr(unicode, 'partition'):
partition = make_wrapper('partition'),
rpartition = make_wrapper('rpartition')
# new in python 2.6
if hasattr(unicode, 'format'):
format = make_wrapper('format')
# not in python 3
if hasattr(unicode, '__getslice__'):
__getslice__ = make_wrapper('__getslice__')
del method, make_wrapper
def _escape_argspec(obj, iterable):
"""Helper for various string-wrapped functions."""
for key, value in iterable:
if hasattr(value, '__html__') or isinstance(value, basestring):
obj[key] = escape(value)
return obj
class _MarkupEscapeHelper(object):
"""Helper for Markup.__mod__"""
def __init__(self, obj):
self.obj = obj
__getitem__ = lambda s, x: _MarkupEscapeHelper(s.obj[x])
__str__ = lambda s: str(escape(s.obj))
__unicode__ = lambda s: unicode(escape(s.obj))
__repr__ = lambda s: str(escape(repr(s.obj)))
__int__ = lambda s: int(s.obj)
__float__ = lambda s: float(s.obj)
class LRUCache(object):
"""A simple LRU Cache implementation."""
# this is fast for small capacities (something below 1000) but doesn't
# scale. But as long as it's only used as storage for templates this
# won't do any harm.
def __init__(self, capacity):
self.capacity = capacity
self._mapping = {}
self._queue = deque()
self._postinit()
def _postinit(self):
# alias all queue methods for faster lookup
self._popleft = self._queue.popleft
self._pop = self._queue.pop
if hasattr(self._queue, 'remove'):
self._remove = self._queue.remove
self._wlock = allocate_lock()
self._append = self._queue.append
def _remove(self, obj):
"""Python 2.4 compatibility."""
for idx, item in enumerate(self._queue):
if item == obj:
del self._queue[idx]
break
def __getstate__(self):
return {
'capacity': self.capacity,
'_mapping': self._mapping,
'_queue': self._queue
}
def __setstate__(self, d):
self.__dict__.update(d)
self._postinit()
def __getnewargs__(self):
return (self.capacity,)
def copy(self):
"""Return an shallow copy of the instance."""
rv = self.__class__(self.capacity)
rv._mapping.update(self._mapping)
rv._queue = deque(self._queue)
return rv
def get(self, key, default=None):
"""Return an item from the cache dict or `default`"""
try:
return self[key]
except KeyError:
return default
def setdefault(self, key, default=None):
"""Set `default` if the key is not in the cache otherwise
leave unchanged. Return the value of this key.
"""
try:
return self[key]
except KeyError:
self[key] = default
return default
def clear(self):
"""Clear the cache."""
self._wlock.acquire()
try:
self._mapping.clear()
self._queue.clear()
finally:
self._wlock.release()
def __contains__(self, key):
"""Check if a key exists in this cache."""
return key in self._mapping
def __len__(self):
"""Return the current size of the cache."""
return len(self._mapping)
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self._mapping
)
def __getitem__(self, key):
"""Get an item from the cache. Moves the item up so that it has the
highest priority then.
Raise an `KeyError` if it does not exist.
"""
rv = self._mapping[key]
if self._queue[-1] != key:
try:
self._remove(key)
except ValueError:
# if something removed the key from the container
# when we read, ignore the ValueError that we would
# get otherwise.
pass
self._append(key)
return rv
def __setitem__(self, key, value):
"""Sets the value for an item. Moves the item up so that it
has the highest priority then.
"""
self._wlock.acquire()
try:
if key in self._mapping:
try:
self._remove(key)
except ValueError:
# __getitem__ is not locked, it might happen
pass
elif len(self._mapping) == self.capacity:
del self._mapping[self._popleft()]
self._append(key)
self._mapping[key] = value
finally:
self._wlock.release()
def __delitem__(self, key):
"""Remove an item from the cache dict.
Raise an `KeyError` if it does not exist.
"""
self._wlock.acquire()
try:
del self._mapping[key]
try:
self._remove(key)
except ValueError:
# __getitem__ is not locked, it might happen
pass
finally:
self._wlock.release()
def items(self):
"""Return a list of items."""
result = [(key, self._mapping[key]) for key in list(self._queue)]
result.reverse()
return result
def iteritems(self):
"""Iterate over all items."""
return iter(self.items())
def values(self):
"""Return a list of all values."""
return [x[1] for x in self.items()]
def itervalue(self):
"""Iterate over all values."""
return iter(self.values())
def keys(self):
"""Return a list of all keys ordered by most recent usage."""
return list(self)
def iterkeys(self):
"""Iterate over all keys in the cache dict, ordered by
the most recent usage.
"""
return reversed(tuple(self._queue))
__iter__ = iterkeys
def __reversed__(self):
"""Iterate over the values in the cache dict, oldest items
coming first.
"""
return iter(tuple(self._queue))
__copy__ = copy
# register the LRU cache as mutable mapping if possible
try:
from collections import MutableMapping
MutableMapping.register(LRUCache)
except ImportError:
pass
class Cycler(object):
"""A cycle helper for templates."""
def __init__(self, *items):
if not items:
raise RuntimeError('at least one item has to be provided')
self.items = items
self.reset()
def reset(self):
"""Resets the cycle."""
self.pos = 0
@property
def current(self):
"""Returns the current item."""
return self.items[self.pos]
def next(self):
"""Goes one item ahead and returns it."""
rv = self.current
self.pos = (self.pos + 1) % len(self.items)
return rv
class Joiner(object):
"""A joining helper for templates."""
def __init__(self, sep=u', '):
self.sep = sep
self.used = False
def __call__(self):
if not self.used:
self.used = True
return u''
return self.sep
# we have to import it down here as the speedups module imports the
# markup type which is define above.
try:
from jinja2._speedups import escape, soft_unicode
except ImportError:
def escape(s):
"""Convert the characters &, <, >, ' and " in string s to HTML-safe
sequences. Use this if you need to display text that might contain
such characters in HTML. Marks return value as markup string.
"""
if hasattr(s, '__html__'):
return s.__html__()
return Markup(unicode(s)
.replace('&', '&')
.replace('>', '>')
.replace('<', '<')
.replace("'", ''')
.replace('"', '"')
)
def soft_unicode(s):
"""Make a string unicode if it isn't already. That way a markup
string is not converted back to unicode.
"""
if not isinstance(s, unicode):
s = unicode(s)
return s
# partials
try:
from functools import partial
except ImportError:
class partial(object):
def __init__(self, _func, *args, **kwargs):
self._func = _func
self._args = args
self._kwargs = kwargs
def __call__(self, *args, **kwargs):
kwargs.update(self._kwargs)
return self._func(*(self._args + args), **kwargs)
|
Antsypc/shadowsocks | refs/heads/master | shadowsocks/encrypt.py | 990 | #!/usr/bin/env python
#
# Copyright 2012-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import hashlib
import logging
from shadowsocks import common
from shadowsocks.crypto import rc4_md5, openssl, sodium, table
method_supported = {}
method_supported.update(rc4_md5.ciphers)
method_supported.update(openssl.ciphers)
method_supported.update(sodium.ciphers)
method_supported.update(table.ciphers)
def random_string(length):
return os.urandom(length)
cached_keys = {}
def try_cipher(key, method=None):
Encryptor(key, method)
def EVP_BytesToKey(password, key_len, iv_len):
# equivalent to OpenSSL's EVP_BytesToKey() with count 1
# so that we make the same key and iv as nodejs version
cached_key = '%s-%d-%d' % (password, key_len, iv_len)
r = cached_keys.get(cached_key, None)
if r:
return r
m = []
i = 0
while len(b''.join(m)) < (key_len + iv_len):
md5 = hashlib.md5()
data = password
if i > 0:
data = m[i - 1] + password
md5.update(data)
m.append(md5.digest())
i += 1
ms = b''.join(m)
key = ms[:key_len]
iv = ms[key_len:key_len + iv_len]
cached_keys[cached_key] = (key, iv)
return key, iv
class Encryptor(object):
def __init__(self, key, method):
self.key = key
self.method = method
self.iv = None
self.iv_sent = False
self.cipher_iv = b''
self.decipher = None
method = method.lower()
self._method_info = self.get_method_info(method)
if self._method_info:
self.cipher = self.get_cipher(key, method, 1,
random_string(self._method_info[1]))
else:
logging.error('method %s not supported' % method)
sys.exit(1)
def get_method_info(self, method):
method = method.lower()
m = method_supported.get(method)
return m
def iv_len(self):
return len(self.cipher_iv)
def get_cipher(self, password, method, op, iv):
password = common.to_bytes(password)
m = self._method_info
if m[0] > 0:
key, iv_ = EVP_BytesToKey(password, m[0], m[1])
else:
# key_length == 0 indicates we should use the key directly
key, iv = password, b''
iv = iv[:m[1]]
if op == 1:
# this iv is for cipher not decipher
self.cipher_iv = iv[:m[1]]
return m[2](method, key, iv, op)
def encrypt(self, buf):
if len(buf) == 0:
return buf
if self.iv_sent:
return self.cipher.update(buf)
else:
self.iv_sent = True
return self.cipher_iv + self.cipher.update(buf)
def decrypt(self, buf):
if len(buf) == 0:
return buf
if self.decipher is None:
decipher_iv_len = self._method_info[1]
decipher_iv = buf[:decipher_iv_len]
self.decipher = self.get_cipher(self.key, self.method, 0,
iv=decipher_iv)
buf = buf[decipher_iv_len:]
if len(buf) == 0:
return buf
return self.decipher.update(buf)
def encrypt_all(password, method, op, data):
result = []
method = method.lower()
(key_len, iv_len, m) = method_supported[method]
if key_len > 0:
key, _ = EVP_BytesToKey(password, key_len, iv_len)
else:
key = password
if op:
iv = random_string(iv_len)
result.append(iv)
else:
iv = data[:iv_len]
data = data[iv_len:]
cipher = m(method, key, iv, op)
result.append(cipher.update(data))
return b''.join(result)
CIPHERS_TO_TEST = [
'aes-128-cfb',
'aes-256-cfb',
'rc4-md5',
'salsa20',
'chacha20',
'table',
]
def test_encryptor():
from os import urandom
plain = urandom(10240)
for method in CIPHERS_TO_TEST:
logging.warn(method)
encryptor = Encryptor(b'key', method)
decryptor = Encryptor(b'key', method)
cipher = encryptor.encrypt(plain)
plain2 = decryptor.decrypt(cipher)
assert plain == plain2
def test_encrypt_all():
from os import urandom
plain = urandom(10240)
for method in CIPHERS_TO_TEST:
logging.warn(method)
cipher = encrypt_all(b'key', method, 1, plain)
plain2 = encrypt_all(b'key', method, 0, cipher)
assert plain == plain2
if __name__ == '__main__':
test_encrypt_all()
test_encryptor()
|
tanmaykm/edx-platform | refs/heads/master | lms/djangoapps/certificates/urls.py | 30 | """
URLs for the certificates app.
"""
from django.conf.urls import patterns, url
from django.conf import settings
from certificates import views
urlpatterns = patterns(
'',
# Certificates HTML view end point to render web certs by user and course
url(
r'^user/(?P<user_id>[^/]*)/course/{course_id}'.format(course_id=settings.COURSE_ID_PATTERN),
views.render_html_view,
name='html_view'
),
# Certificates HTML view end point to render web certs by certificate_uuid
url(
r'^(?P<certificate_uuid>[0-9a-f]{32})$',
views.render_cert_by_uuid,
name='render_cert_by_uuid'
),
# End-points used by student support
# The views in the lms/djangoapps/support use these end-points
# to retrieve certificate information and regenerate certificates.
url(r'search', views.search_certificates, name="search"),
url(r'regenerate', views.regenerate_certificate_for_user, name="regenerate_certificate_for_user"),
url(r'generate', views.generate_certificate_for_user, name="generate_certificate_for_user"),
)
|
vaidap/zulip | refs/heads/master | analytics/models.py | 9 | from django.db import models
from zerver.models import Realm, UserProfile, Stream, Recipient
from zerver.lib.str_utils import ModelReprMixin
from zerver.lib.timestamp import floor_to_day
import datetime
from typing import Optional, Tuple, Union, Dict, Any, Text
class FillState(ModelReprMixin, models.Model):
property = models.CharField(max_length=40, unique=True) # type: Text
end_time = models.DateTimeField() # type: datetime.datetime
# Valid states are {DONE, STARTED}
DONE = 1
STARTED = 2
state = models.PositiveSmallIntegerField() # type: int
last_modified = models.DateTimeField(auto_now=True) # type: datetime.datetime
def __unicode__(self):
# type: () -> Text
return u"<FillState: %s %s %s>" % (self.property, self.end_time, self.state)
# The earliest/starting end_time in FillState
# We assume there is at least one realm
def installation_epoch():
# type: () -> datetime.datetime
earliest_realm_creation = Realm.objects.aggregate(models.Min('date_created'))['date_created__min']
return floor_to_day(earliest_realm_creation)
def last_successful_fill(property):
# type: (str) -> Optional[datetime.datetime]
fillstate = FillState.objects.filter(property=property).first()
if fillstate is None:
return None
if fillstate.state == FillState.DONE:
return fillstate.end_time
return fillstate.end_time - datetime.timedelta(hours=1)
# would only ever make entries here by hand
class Anomaly(ModelReprMixin, models.Model):
info = models.CharField(max_length=1000) # type: Text
def __unicode__(self):
# type: () -> Text
return u"<Anomaly: %s... %s>" % (self.info, self.id)
class BaseCount(ModelReprMixin, models.Model):
# Note: When inheriting from BaseCount, you may want to rearrange
# the order of the columns in the migration to make sure they
# match how you'd like the table to be arranged.
property = models.CharField(max_length=32) # type: Text
subgroup = models.CharField(max_length=16, null=True) # type: Optional[Text]
end_time = models.DateTimeField() # type: datetime.datetime
value = models.BigIntegerField() # type: int
anomaly = models.ForeignKey(Anomaly, null=True) # type: Optional[Anomaly]
class Meta(object):
abstract = True
class InstallationCount(BaseCount):
class Meta(object):
unique_together = ("property", "subgroup", "end_time")
def __unicode__(self):
# type: () -> Text
return u"<InstallationCount: %s %s %s>" % (self.property, self.subgroup, self.value)
class RealmCount(BaseCount):
realm = models.ForeignKey(Realm)
class Meta(object):
unique_together = ("realm", "property", "subgroup", "end_time")
index_together = ["property", "end_time"]
def __unicode__(self):
# type: () -> Text
return u"<RealmCount: %s %s %s %s>" % (self.realm, self.property, self.subgroup, self.value)
class UserCount(BaseCount):
user = models.ForeignKey(UserProfile)
realm = models.ForeignKey(Realm)
class Meta(object):
unique_together = ("user", "property", "subgroup", "end_time")
# This index dramatically improves the performance of
# aggregating from users to realms
index_together = ["property", "realm", "end_time"]
def __unicode__(self):
# type: () -> Text
return u"<UserCount: %s %s %s %s>" % (self.user, self.property, self.subgroup, self.value)
class StreamCount(BaseCount):
stream = models.ForeignKey(Stream)
realm = models.ForeignKey(Realm)
class Meta(object):
unique_together = ("stream", "property", "subgroup", "end_time")
# This index dramatically improves the performance of
# aggregating from streams to realms
index_together = ["property", "realm", "end_time"]
def __unicode__(self):
# type: () -> Text
return u"<StreamCount: %s %s %s %s %s>" % (self.stream, self.property, self.subgroup, self.value, self.id)
|
ajs124/esp-idf | refs/heads/master | components/idf_test/integration_test/TestCaseScript/TCPStress/TCPSendRecv.py | 15 | from TCAction import TCActionBase
from NativeLog import NativeLog
import time
import random
import string
TEST_COUNT_ONE_ROUND = 1000
class TestCase(TCActionBase.CommonTCActionBase):
def __init__(self, test_case, test_env, timeout=30, log_path=TCActionBase.LOG_PATH):
TCActionBase.CommonTCActionBase.__init__(self, test_case, test_env, timeout=timeout, log_path=log_path)
# load param from excel
cmd_set = test_case["cmd set"]
for i in range(1, len(cmd_set)):
if cmd_set[i][0] != "dummy":
cmd_string = "self." + cmd_set[i][0]
exec cmd_string
self.result_cntx = TCActionBase.ResultCheckContext(self, test_env, self.tc_name)
pass
def cleanup(self):
# step 0 turn on recv print
checker_stings = ["P SSC1 C +RECVPRINT:1", "P SSC2 C +RECVPRINT:1"]
test_action_string = ["SSC SSC1 soc -R -o 1", "SSC SSC2 soc -R -o 1"]
fail_string = "Fail, Fail to turn on recv print"
self.load_and_exe_one_step(checker_stings, test_action_string, fail_string)
pass
def execute(self):
TCActionBase.TCActionBase.execute(self)
self.result_cntx.start()
try:
# configurable params
send_len = self.send_len
test_time = self.test_time * 60
duplex = self.duplex
conn_num = self.conn_num
send_delay = self.send_delay
# configurable params
except StandardError, e:
NativeLog.add_trace_critical("Error configuration for TCPSendRecv script, error is %s" % e)
raise StandardError("Error configuration")
ssid = "".join([random.choice(string.lowercase) for m in range(10)])
password = "".join([random.choice(string.lowercase) for m in range(10)])
tcp_port = random.randint(10000, 50000)
# step 0 set ap
checker_stings = ["R SSC1 C +SAP:OK"]
test_action_string = ["SSC SSC1 ap -S -s %s -p %s -t 3" % (ssid, password)]
fail_string = "Fail, Fail to set ap"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
self.result_cntx.set_result("Fail")
return
# step 1 connect to ap and turn off recv print
checker_stings = ["R SSC2 C +JAP:CONNECTED"]
test_action_string = ["SSC SSC2 sta -C -s %s -p %s" % (ssid, password)]
fail_string = "Fail, Fail to connect to server"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string, check_time=200) is False:
self.result_cntx.set_result("Fail")
return
checker_stings = ["P SSC1 C +RECVPRINT:0", "P SSC2 C +RECVPRINT:0"]
test_action_string = ["SSC SSC1 soc -R -o 0", "SSC SSC2 soc -R -o 0"]
fail_string = "Fail, Fail to turn off recv print"
self.load_and_exe_one_step(checker_stings, test_action_string, fail_string)
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string, check_time=200) is False:
self.result_cntx.set_result("Fail")
return
# step 2 create server on AP
checker_stings = ["R SSC1 A <server_sock>:\+BIND:(\d+),OK"]
test_action_string = ["SSC SSC1 soc -B -t TCP -p %s" % tcp_port]
fail_string = "Fail, Fail to create server"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
self.result_cntx.set_result("Fail")
return
checker_stings = ["R SSC1 A <server_sock>:\+LISTEN:(\d+),OK"]
test_action_string = ["SSC SSC1 soc -L -s <server_sock>"]
fail_string = "Fail, Fail to create server"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
self.result_cntx.set_result("Fail")
return
# step 3 create conn_num tcp connections
for i in range(conn_num):
checker_stings = ["R SSC2 A <client_sock%s>:\+BIND:(\d+),OK" % i]
test_action_string = ["SSC SSC2 soc -B -t TCP"]
fail_string = "Fail, Fail to bind"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
self.result_cntx.set_result("Fail")
return
checker_stings = ["P SSC1 A <accept_sock%s>:\+ACCEPT:(\d+),\d+" % i,
"P SSC2 RE \+CONNECT:\d+,OK"]
test_action_string = ["SSC SSC2 soc -C -s <client_sock%s> -i <target_ap_ip> -p %s" % (i, tcp_port)]
fail_string = "Fail, Fail to connect"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
self.result_cntx.set_result("Fail")
return
start_time = time.time()
# step 4, do send/recv
while time.time()-start_time < test_time:
checker_stings = ["P SSC1 NC ERROR NC CLOSE NC ERROR"]
for i in range(conn_num):
test_action_string = ["SSC SSC2 soc -S -s <client_sock%d> -l %d -n %d -j %d" %
(i, send_len, TEST_COUNT_ONE_ROUND, send_delay)]
checker_stings.append("P SSC2 RE \"\+SEND:%%%%s,OK\"%%%%(<client_sock%d>) NC ERROR NC CLOSE" % i)
if duplex is True:
checker_stings.append("P SSC1 RE \"\+SEND:%%%%s,OK\"%%%%(<accept_sock%d>)" % i)
test_action_string.append("SSC SSC1 soc -S -s <accept_sock%d> -l %d -n %d -j %d" %
(i, send_len, TEST_COUNT_ONE_ROUND, send_delay))
fail_string = "Fail, Failed on send command"
if self.load_and_exe_one_step([], test_action_string, fail_string) is False:
self.result_cntx.set_result("Fail")
break
fail_string = "Fail, Failed to send/recv data"
if self.load_and_exe_one_step(checker_stings, ["DELAY 0.1"], fail_string,
check_freq=1, check_time=300) is False:
NativeLog.add_prompt_trace("time escape: %s" % (time.time() - start_time))
self.result_cntx.set_result("Fail")
return
NativeLog.add_prompt_trace("time escape: %s" % (time.time() - start_time))
if (time.time() - start_time) >= test_time:
self.result_cntx.set_result("Succeed")
else:
self.result_cntx.set_result("Failed")
# finally, execute done
def result_check(self, port_name, data):
TCActionBase.CommonTCActionBase.result_check(self, port_name, data)
self.result_cntx.append_data(port_name, data)
def main():
pass
if __name__ == '__main__':
main()
|
boyuegame/kbengine | refs/heads/master | kbe/res/scripts/common/Lib/pickletools.py | 74 | '''"Executable documentation" for the pickle module.
Extensive comments about the pickle protocols and pickle-machine opcodes
can be found here. Some functions meant for external use:
genops(pickle)
Generate all the opcodes in a pickle, as (opcode, arg, position) triples.
dis(pickle, out=None, memo=None, indentlevel=4)
Print a symbolic disassembly of a pickle.
'''
import codecs
import io
import pickle
import re
import sys
__all__ = ['dis', 'genops', 'optimize']
bytes_types = pickle.bytes_types
# Other ideas:
#
# - A pickle verifier: read a pickle and check it exhaustively for
# well-formedness. dis() does a lot of this already.
#
# - A protocol identifier: examine a pickle and return its protocol number
# (== the highest .proto attr value among all the opcodes in the pickle).
# dis() already prints this info at the end.
#
# - A pickle optimizer: for example, tuple-building code is sometimes more
# elaborate than necessary, catering for the possibility that the tuple
# is recursive. Or lots of times a PUT is generated that's never accessed
# by a later GET.
# "A pickle" is a program for a virtual pickle machine (PM, but more accurately
# called an unpickling machine). It's a sequence of opcodes, interpreted by the
# PM, building an arbitrarily complex Python object.
#
# For the most part, the PM is very simple: there are no looping, testing, or
# conditional instructions, no arithmetic and no function calls. Opcodes are
# executed once each, from first to last, until a STOP opcode is reached.
#
# The PM has two data areas, "the stack" and "the memo".
#
# Many opcodes push Python objects onto the stack; e.g., INT pushes a Python
# integer object on the stack, whose value is gotten from a decimal string
# literal immediately following the INT opcode in the pickle bytestream. Other
# opcodes take Python objects off the stack. The result of unpickling is
# whatever object is left on the stack when the final STOP opcode is executed.
#
# The memo is simply an array of objects, or it can be implemented as a dict
# mapping little integers to objects. The memo serves as the PM's "long term
# memory", and the little integers indexing the memo are akin to variable
# names. Some opcodes pop a stack object into the memo at a given index,
# and others push a memo object at a given index onto the stack again.
#
# At heart, that's all the PM has. Subtleties arise for these reasons:
#
# + Object identity. Objects can be arbitrarily complex, and subobjects
# may be shared (for example, the list [a, a] refers to the same object a
# twice). It can be vital that unpickling recreate an isomorphic object
# graph, faithfully reproducing sharing.
#
# + Recursive objects. For example, after "L = []; L.append(L)", L is a
# list, and L[0] is the same list. This is related to the object identity
# point, and some sequences of pickle opcodes are subtle in order to
# get the right result in all cases.
#
# + Things pickle doesn't know everything about. Examples of things pickle
# does know everything about are Python's builtin scalar and container
# types, like ints and tuples. They generally have opcodes dedicated to
# them. For things like module references and instances of user-defined
# classes, pickle's knowledge is limited. Historically, many enhancements
# have been made to the pickle protocol in order to do a better (faster,
# and/or more compact) job on those.
#
# + Backward compatibility and micro-optimization. As explained below,
# pickle opcodes never go away, not even when better ways to do a thing
# get invented. The repertoire of the PM just keeps growing over time.
# For example, protocol 0 had two opcodes for building Python integers (INT
# and LONG), protocol 1 added three more for more-efficient pickling of short
# integers, and protocol 2 added two more for more-efficient pickling of
# long integers (before protocol 2, the only ways to pickle a Python long
# took time quadratic in the number of digits, for both pickling and
# unpickling). "Opcode bloat" isn't so much a subtlety as a source of
# wearying complication.
#
#
# Pickle protocols:
#
# For compatibility, the meaning of a pickle opcode never changes. Instead new
# pickle opcodes get added, and each version's unpickler can handle all the
# pickle opcodes in all protocol versions to date. So old pickles continue to
# be readable forever. The pickler can generally be told to restrict itself to
# the subset of opcodes available under previous protocol versions too, so that
# users can create pickles under the current version readable by older
# versions. However, a pickle does not contain its version number embedded
# within it. If an older unpickler tries to read a pickle using a later
# protocol, the result is most likely an exception due to seeing an unknown (in
# the older unpickler) opcode.
#
# The original pickle used what's now called "protocol 0", and what was called
# "text mode" before Python 2.3. The entire pickle bytestream is made up of
# printable 7-bit ASCII characters, plus the newline character, in protocol 0.
# That's why it was called text mode. Protocol 0 is small and elegant, but
# sometimes painfully inefficient.
#
# The second major set of additions is now called "protocol 1", and was called
# "binary mode" before Python 2.3. This added many opcodes with arguments
# consisting of arbitrary bytes, including NUL bytes and unprintable "high bit"
# bytes. Binary mode pickles can be substantially smaller than equivalent
# text mode pickles, and sometimes faster too; e.g., BININT represents a 4-byte
# int as 4 bytes following the opcode, which is cheaper to unpickle than the
# (perhaps) 11-character decimal string attached to INT. Protocol 1 also added
# a number of opcodes that operate on many stack elements at once (like APPENDS
# and SETITEMS), and "shortcut" opcodes (like EMPTY_DICT and EMPTY_TUPLE).
#
# The third major set of additions came in Python 2.3, and is called "protocol
# 2". This added:
#
# - A better way to pickle instances of new-style classes (NEWOBJ).
#
# - A way for a pickle to identify its protocol (PROTO).
#
# - Time- and space- efficient pickling of long ints (LONG{1,4}).
#
# - Shortcuts for small tuples (TUPLE{1,2,3}}.
#
# - Dedicated opcodes for bools (NEWTRUE, NEWFALSE).
#
# - The "extension registry", a vector of popular objects that can be pushed
# efficiently by index (EXT{1,2,4}). This is akin to the memo and GET, but
# the registry contents are predefined (there's nothing akin to the memo's
# PUT).
#
# Another independent change with Python 2.3 is the abandonment of any
# pretense that it might be safe to load pickles received from untrusted
# parties -- no sufficient security analysis has been done to guarantee
# this and there isn't a use case that warrants the expense of such an
# analysis.
#
# To this end, all tests for __safe_for_unpickling__ or for
# copyreg.safe_constructors are removed from the unpickling code.
# References to these variables in the descriptions below are to be seen
# as describing unpickling in Python 2.2 and before.
# Meta-rule: Descriptions are stored in instances of descriptor objects,
# with plain constructors. No meta-language is defined from which
# descriptors could be constructed. If you want, e.g., XML, write a little
# program to generate XML from the objects.
##############################################################################
# Some pickle opcodes have an argument, following the opcode in the
# bytestream. An argument is of a specific type, described by an instance
# of ArgumentDescriptor. These are not to be confused with arguments taken
# off the stack -- ArgumentDescriptor applies only to arguments embedded in
# the opcode stream, immediately following an opcode.
# Represents the number of bytes consumed by an argument delimited by the
# next newline character.
UP_TO_NEWLINE = -1
# Represents the number of bytes consumed by a two-argument opcode where
# the first argument gives the number of bytes in the second argument.
TAKEN_FROM_ARGUMENT1 = -2 # num bytes is 1-byte unsigned int
TAKEN_FROM_ARGUMENT4 = -3 # num bytes is 4-byte signed little-endian int
TAKEN_FROM_ARGUMENT4U = -4 # num bytes is 4-byte unsigned little-endian int
TAKEN_FROM_ARGUMENT8U = -5 # num bytes is 8-byte unsigned little-endian int
class ArgumentDescriptor(object):
__slots__ = (
# name of descriptor record, also a module global name; a string
'name',
# length of argument, in bytes; an int; UP_TO_NEWLINE and
# TAKEN_FROM_ARGUMENT{1,4,8} are negative values for variable-length
# cases
'n',
# a function taking a file-like object, reading this kind of argument
# from the object at the current position, advancing the current
# position by n bytes, and returning the value of the argument
'reader',
# human-readable docs for this arg descriptor; a string
'doc',
)
def __init__(self, name, n, reader, doc):
assert isinstance(name, str)
self.name = name
assert isinstance(n, int) and (n >= 0 or
n in (UP_TO_NEWLINE,
TAKEN_FROM_ARGUMENT1,
TAKEN_FROM_ARGUMENT4,
TAKEN_FROM_ARGUMENT4U,
TAKEN_FROM_ARGUMENT8U))
self.n = n
self.reader = reader
assert isinstance(doc, str)
self.doc = doc
from struct import unpack as _unpack
def read_uint1(f):
r"""
>>> import io
>>> read_uint1(io.BytesIO(b'\xff'))
255
"""
data = f.read(1)
if data:
return data[0]
raise ValueError("not enough data in stream to read uint1")
uint1 = ArgumentDescriptor(
name='uint1',
n=1,
reader=read_uint1,
doc="One-byte unsigned integer.")
def read_uint2(f):
r"""
>>> import io
>>> read_uint2(io.BytesIO(b'\xff\x00'))
255
>>> read_uint2(io.BytesIO(b'\xff\xff'))
65535
"""
data = f.read(2)
if len(data) == 2:
return _unpack("<H", data)[0]
raise ValueError("not enough data in stream to read uint2")
uint2 = ArgumentDescriptor(
name='uint2',
n=2,
reader=read_uint2,
doc="Two-byte unsigned integer, little-endian.")
def read_int4(f):
r"""
>>> import io
>>> read_int4(io.BytesIO(b'\xff\x00\x00\x00'))
255
>>> read_int4(io.BytesIO(b'\x00\x00\x00\x80')) == -(2**31)
True
"""
data = f.read(4)
if len(data) == 4:
return _unpack("<i", data)[0]
raise ValueError("not enough data in stream to read int4")
int4 = ArgumentDescriptor(
name='int4',
n=4,
reader=read_int4,
doc="Four-byte signed integer, little-endian, 2's complement.")
def read_uint4(f):
r"""
>>> import io
>>> read_uint4(io.BytesIO(b'\xff\x00\x00\x00'))
255
>>> read_uint4(io.BytesIO(b'\x00\x00\x00\x80')) == 2**31
True
"""
data = f.read(4)
if len(data) == 4:
return _unpack("<I", data)[0]
raise ValueError("not enough data in stream to read uint4")
uint4 = ArgumentDescriptor(
name='uint4',
n=4,
reader=read_uint4,
doc="Four-byte unsigned integer, little-endian.")
def read_uint8(f):
r"""
>>> import io
>>> read_uint8(io.BytesIO(b'\xff\x00\x00\x00\x00\x00\x00\x00'))
255
>>> read_uint8(io.BytesIO(b'\xff' * 8)) == 2**64-1
True
"""
data = f.read(8)
if len(data) == 8:
return _unpack("<Q", data)[0]
raise ValueError("not enough data in stream to read uint8")
uint8 = ArgumentDescriptor(
name='uint8',
n=8,
reader=read_uint8,
doc="Eight-byte unsigned integer, little-endian.")
def read_stringnl(f, decode=True, stripquotes=True):
r"""
>>> import io
>>> read_stringnl(io.BytesIO(b"'abcd'\nefg\n"))
'abcd'
>>> read_stringnl(io.BytesIO(b"\n"))
Traceback (most recent call last):
...
ValueError: no string quotes around b''
>>> read_stringnl(io.BytesIO(b"\n"), stripquotes=False)
''
>>> read_stringnl(io.BytesIO(b"''\n"))
''
>>> read_stringnl(io.BytesIO(b'"abcd"'))
Traceback (most recent call last):
...
ValueError: no newline found when trying to read stringnl
Embedded escapes are undone in the result.
>>> read_stringnl(io.BytesIO(br"'a\n\\b\x00c\td'" + b"\n'e'"))
'a\n\\b\x00c\td'
"""
data = f.readline()
if not data.endswith(b'\n'):
raise ValueError("no newline found when trying to read stringnl")
data = data[:-1] # lose the newline
if stripquotes:
for q in (b'"', b"'"):
if data.startswith(q):
if not data.endswith(q):
raise ValueError("strinq quote %r not found at both "
"ends of %r" % (q, data))
data = data[1:-1]
break
else:
raise ValueError("no string quotes around %r" % data)
if decode:
data = codecs.escape_decode(data)[0].decode("ascii")
return data
stringnl = ArgumentDescriptor(
name='stringnl',
n=UP_TO_NEWLINE,
reader=read_stringnl,
doc="""A newline-terminated string.
This is a repr-style string, with embedded escapes, and
bracketing quotes.
""")
def read_stringnl_noescape(f):
return read_stringnl(f, stripquotes=False)
stringnl_noescape = ArgumentDescriptor(
name='stringnl_noescape',
n=UP_TO_NEWLINE,
reader=read_stringnl_noescape,
doc="""A newline-terminated string.
This is a str-style string, without embedded escapes,
or bracketing quotes. It should consist solely of
printable ASCII characters.
""")
def read_stringnl_noescape_pair(f):
r"""
>>> import io
>>> read_stringnl_noescape_pair(io.BytesIO(b"Queue\nEmpty\njunk"))
'Queue Empty'
"""
return "%s %s" % (read_stringnl_noescape(f), read_stringnl_noescape(f))
stringnl_noescape_pair = ArgumentDescriptor(
name='stringnl_noescape_pair',
n=UP_TO_NEWLINE,
reader=read_stringnl_noescape_pair,
doc="""A pair of newline-terminated strings.
These are str-style strings, without embedded
escapes, or bracketing quotes. They should
consist solely of printable ASCII characters.
The pair is returned as a single string, with
a single blank separating the two strings.
""")
def read_string1(f):
r"""
>>> import io
>>> read_string1(io.BytesIO(b"\x00"))
''
>>> read_string1(io.BytesIO(b"\x03abcdef"))
'abc'
"""
n = read_uint1(f)
assert n >= 0
data = f.read(n)
if len(data) == n:
return data.decode("latin-1")
raise ValueError("expected %d bytes in a string1, but only %d remain" %
(n, len(data)))
string1 = ArgumentDescriptor(
name="string1",
n=TAKEN_FROM_ARGUMENT1,
reader=read_string1,
doc="""A counted string.
The first argument is a 1-byte unsigned int giving the number
of bytes in the string, and the second argument is that many
bytes.
""")
def read_string4(f):
r"""
>>> import io
>>> read_string4(io.BytesIO(b"\x00\x00\x00\x00abc"))
''
>>> read_string4(io.BytesIO(b"\x03\x00\x00\x00abcdef"))
'abc'
>>> read_string4(io.BytesIO(b"\x00\x00\x00\x03abcdef"))
Traceback (most recent call last):
...
ValueError: expected 50331648 bytes in a string4, but only 6 remain
"""
n = read_int4(f)
if n < 0:
raise ValueError("string4 byte count < 0: %d" % n)
data = f.read(n)
if len(data) == n:
return data.decode("latin-1")
raise ValueError("expected %d bytes in a string4, but only %d remain" %
(n, len(data)))
string4 = ArgumentDescriptor(
name="string4",
n=TAKEN_FROM_ARGUMENT4,
reader=read_string4,
doc="""A counted string.
The first argument is a 4-byte little-endian signed int giving
the number of bytes in the string, and the second argument is
that many bytes.
""")
def read_bytes1(f):
r"""
>>> import io
>>> read_bytes1(io.BytesIO(b"\x00"))
b''
>>> read_bytes1(io.BytesIO(b"\x03abcdef"))
b'abc'
"""
n = read_uint1(f)
assert n >= 0
data = f.read(n)
if len(data) == n:
return data
raise ValueError("expected %d bytes in a bytes1, but only %d remain" %
(n, len(data)))
bytes1 = ArgumentDescriptor(
name="bytes1",
n=TAKEN_FROM_ARGUMENT1,
reader=read_bytes1,
doc="""A counted bytes string.
The first argument is a 1-byte unsigned int giving the number
of bytes in the string, and the second argument is that many
bytes.
""")
def read_bytes1(f):
r"""
>>> import io
>>> read_bytes1(io.BytesIO(b"\x00"))
b''
>>> read_bytes1(io.BytesIO(b"\x03abcdef"))
b'abc'
"""
n = read_uint1(f)
assert n >= 0
data = f.read(n)
if len(data) == n:
return data
raise ValueError("expected %d bytes in a bytes1, but only %d remain" %
(n, len(data)))
bytes1 = ArgumentDescriptor(
name="bytes1",
n=TAKEN_FROM_ARGUMENT1,
reader=read_bytes1,
doc="""A counted bytes string.
The first argument is a 1-byte unsigned int giving the number
of bytes, and the second argument is that many bytes.
""")
def read_bytes4(f):
r"""
>>> import io
>>> read_bytes4(io.BytesIO(b"\x00\x00\x00\x00abc"))
b''
>>> read_bytes4(io.BytesIO(b"\x03\x00\x00\x00abcdef"))
b'abc'
>>> read_bytes4(io.BytesIO(b"\x00\x00\x00\x03abcdef"))
Traceback (most recent call last):
...
ValueError: expected 50331648 bytes in a bytes4, but only 6 remain
"""
n = read_uint4(f)
assert n >= 0
if n > sys.maxsize:
raise ValueError("bytes4 byte count > sys.maxsize: %d" % n)
data = f.read(n)
if len(data) == n:
return data
raise ValueError("expected %d bytes in a bytes4, but only %d remain" %
(n, len(data)))
bytes4 = ArgumentDescriptor(
name="bytes4",
n=TAKEN_FROM_ARGUMENT4U,
reader=read_bytes4,
doc="""A counted bytes string.
The first argument is a 4-byte little-endian unsigned int giving
the number of bytes, and the second argument is that many bytes.
""")
def read_bytes8(f):
r"""
>>> import io, struct, sys
>>> read_bytes8(io.BytesIO(b"\x00\x00\x00\x00\x00\x00\x00\x00abc"))
b''
>>> read_bytes8(io.BytesIO(b"\x03\x00\x00\x00\x00\x00\x00\x00abcdef"))
b'abc'
>>> bigsize8 = struct.pack("<Q", sys.maxsize//3)
>>> read_bytes8(io.BytesIO(bigsize8 + b"abcdef")) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: expected ... bytes in a bytes8, but only 6 remain
"""
n = read_uint8(f)
assert n >= 0
if n > sys.maxsize:
raise ValueError("bytes8 byte count > sys.maxsize: %d" % n)
data = f.read(n)
if len(data) == n:
return data
raise ValueError("expected %d bytes in a bytes8, but only %d remain" %
(n, len(data)))
bytes8 = ArgumentDescriptor(
name="bytes8",
n=TAKEN_FROM_ARGUMENT8U,
reader=read_bytes8,
doc="""A counted bytes string.
The first argument is a 8-byte little-endian unsigned int giving
the number of bytes, and the second argument is that many bytes.
""")
def read_unicodestringnl(f):
r"""
>>> import io
>>> read_unicodestringnl(io.BytesIO(b"abc\\uabcd\njunk")) == 'abc\uabcd'
True
"""
data = f.readline()
if not data.endswith(b'\n'):
raise ValueError("no newline found when trying to read "
"unicodestringnl")
data = data[:-1] # lose the newline
return str(data, 'raw-unicode-escape')
unicodestringnl = ArgumentDescriptor(
name='unicodestringnl',
n=UP_TO_NEWLINE,
reader=read_unicodestringnl,
doc="""A newline-terminated Unicode string.
This is raw-unicode-escape encoded, so consists of
printable ASCII characters, and may contain embedded
escape sequences.
""")
def read_unicodestring1(f):
r"""
>>> import io
>>> s = 'abcd\uabcd'
>>> enc = s.encode('utf-8')
>>> enc
b'abcd\xea\xaf\x8d'
>>> n = bytes([len(enc)]) # little-endian 1-byte length
>>> t = read_unicodestring1(io.BytesIO(n + enc + b'junk'))
>>> s == t
True
>>> read_unicodestring1(io.BytesIO(n + enc[:-1]))
Traceback (most recent call last):
...
ValueError: expected 7 bytes in a unicodestring1, but only 6 remain
"""
n = read_uint1(f)
assert n >= 0
data = f.read(n)
if len(data) == n:
return str(data, 'utf-8', 'surrogatepass')
raise ValueError("expected %d bytes in a unicodestring1, but only %d "
"remain" % (n, len(data)))
unicodestring1 = ArgumentDescriptor(
name="unicodestring1",
n=TAKEN_FROM_ARGUMENT1,
reader=read_unicodestring1,
doc="""A counted Unicode string.
The first argument is a 1-byte little-endian signed int
giving the number of bytes in the string, and the second
argument-- the UTF-8 encoding of the Unicode string --
contains that many bytes.
""")
def read_unicodestring4(f):
r"""
>>> import io
>>> s = 'abcd\uabcd'
>>> enc = s.encode('utf-8')
>>> enc
b'abcd\xea\xaf\x8d'
>>> n = bytes([len(enc), 0, 0, 0]) # little-endian 4-byte length
>>> t = read_unicodestring4(io.BytesIO(n + enc + b'junk'))
>>> s == t
True
>>> read_unicodestring4(io.BytesIO(n + enc[:-1]))
Traceback (most recent call last):
...
ValueError: expected 7 bytes in a unicodestring4, but only 6 remain
"""
n = read_uint4(f)
assert n >= 0
if n > sys.maxsize:
raise ValueError("unicodestring4 byte count > sys.maxsize: %d" % n)
data = f.read(n)
if len(data) == n:
return str(data, 'utf-8', 'surrogatepass')
raise ValueError("expected %d bytes in a unicodestring4, but only %d "
"remain" % (n, len(data)))
unicodestring4 = ArgumentDescriptor(
name="unicodestring4",
n=TAKEN_FROM_ARGUMENT4U,
reader=read_unicodestring4,
doc="""A counted Unicode string.
The first argument is a 4-byte little-endian signed int
giving the number of bytes in the string, and the second
argument-- the UTF-8 encoding of the Unicode string --
contains that many bytes.
""")
def read_unicodestring8(f):
r"""
>>> import io
>>> s = 'abcd\uabcd'
>>> enc = s.encode('utf-8')
>>> enc
b'abcd\xea\xaf\x8d'
>>> n = bytes([len(enc)]) + bytes(7) # little-endian 8-byte length
>>> t = read_unicodestring8(io.BytesIO(n + enc + b'junk'))
>>> s == t
True
>>> read_unicodestring8(io.BytesIO(n + enc[:-1]))
Traceback (most recent call last):
...
ValueError: expected 7 bytes in a unicodestring8, but only 6 remain
"""
n = read_uint8(f)
assert n >= 0
if n > sys.maxsize:
raise ValueError("unicodestring8 byte count > sys.maxsize: %d" % n)
data = f.read(n)
if len(data) == n:
return str(data, 'utf-8', 'surrogatepass')
raise ValueError("expected %d bytes in a unicodestring8, but only %d "
"remain" % (n, len(data)))
unicodestring8 = ArgumentDescriptor(
name="unicodestring8",
n=TAKEN_FROM_ARGUMENT8U,
reader=read_unicodestring8,
doc="""A counted Unicode string.
The first argument is a 8-byte little-endian signed int
giving the number of bytes in the string, and the second
argument-- the UTF-8 encoding of the Unicode string --
contains that many bytes.
""")
def read_decimalnl_short(f):
r"""
>>> import io
>>> read_decimalnl_short(io.BytesIO(b"1234\n56"))
1234
>>> read_decimalnl_short(io.BytesIO(b"1234L\n56"))
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 10: b'1234L'
"""
s = read_stringnl(f, decode=False, stripquotes=False)
# There's a hack for True and False here.
if s == b"00":
return False
elif s == b"01":
return True
return int(s)
def read_decimalnl_long(f):
r"""
>>> import io
>>> read_decimalnl_long(io.BytesIO(b"1234L\n56"))
1234
>>> read_decimalnl_long(io.BytesIO(b"123456789012345678901234L\n6"))
123456789012345678901234
"""
s = read_stringnl(f, decode=False, stripquotes=False)
if s[-1:] == b'L':
s = s[:-1]
return int(s)
decimalnl_short = ArgumentDescriptor(
name='decimalnl_short',
n=UP_TO_NEWLINE,
reader=read_decimalnl_short,
doc="""A newline-terminated decimal integer literal.
This never has a trailing 'L', and the integer fit
in a short Python int on the box where the pickle
was written -- but there's no guarantee it will fit
in a short Python int on the box where the pickle
is read.
""")
decimalnl_long = ArgumentDescriptor(
name='decimalnl_long',
n=UP_TO_NEWLINE,
reader=read_decimalnl_long,
doc="""A newline-terminated decimal integer literal.
This has a trailing 'L', and can represent integers
of any size.
""")
def read_floatnl(f):
r"""
>>> import io
>>> read_floatnl(io.BytesIO(b"-1.25\n6"))
-1.25
"""
s = read_stringnl(f, decode=False, stripquotes=False)
return float(s)
floatnl = ArgumentDescriptor(
name='floatnl',
n=UP_TO_NEWLINE,
reader=read_floatnl,
doc="""A newline-terminated decimal floating literal.
In general this requires 17 significant digits for roundtrip
identity, and pickling then unpickling infinities, NaNs, and
minus zero doesn't work across boxes, or on some boxes even
on itself (e.g., Windows can't read the strings it produces
for infinities or NaNs).
""")
def read_float8(f):
r"""
>>> import io, struct
>>> raw = struct.pack(">d", -1.25)
>>> raw
b'\xbf\xf4\x00\x00\x00\x00\x00\x00'
>>> read_float8(io.BytesIO(raw + b"\n"))
-1.25
"""
data = f.read(8)
if len(data) == 8:
return _unpack(">d", data)[0]
raise ValueError("not enough data in stream to read float8")
float8 = ArgumentDescriptor(
name='float8',
n=8,
reader=read_float8,
doc="""An 8-byte binary representation of a float, big-endian.
The format is unique to Python, and shared with the struct
module (format string '>d') "in theory" (the struct and pickle
implementations don't share the code -- they should). It's
strongly related to the IEEE-754 double format, and, in normal
cases, is in fact identical to the big-endian 754 double format.
On other boxes the dynamic range is limited to that of a 754
double, and "add a half and chop" rounding is used to reduce
the precision to 53 bits. However, even on a 754 box,
infinities, NaNs, and minus zero may not be handled correctly
(may not survive roundtrip pickling intact).
""")
# Protocol 2 formats
from pickle import decode_long
def read_long1(f):
r"""
>>> import io
>>> read_long1(io.BytesIO(b"\x00"))
0
>>> read_long1(io.BytesIO(b"\x02\xff\x00"))
255
>>> read_long1(io.BytesIO(b"\x02\xff\x7f"))
32767
>>> read_long1(io.BytesIO(b"\x02\x00\xff"))
-256
>>> read_long1(io.BytesIO(b"\x02\x00\x80"))
-32768
"""
n = read_uint1(f)
data = f.read(n)
if len(data) != n:
raise ValueError("not enough data in stream to read long1")
return decode_long(data)
long1 = ArgumentDescriptor(
name="long1",
n=TAKEN_FROM_ARGUMENT1,
reader=read_long1,
doc="""A binary long, little-endian, using 1-byte size.
This first reads one byte as an unsigned size, then reads that
many bytes and interprets them as a little-endian 2's-complement long.
If the size is 0, that's taken as a shortcut for the long 0L.
""")
def read_long4(f):
r"""
>>> import io
>>> read_long4(io.BytesIO(b"\x02\x00\x00\x00\xff\x00"))
255
>>> read_long4(io.BytesIO(b"\x02\x00\x00\x00\xff\x7f"))
32767
>>> read_long4(io.BytesIO(b"\x02\x00\x00\x00\x00\xff"))
-256
>>> read_long4(io.BytesIO(b"\x02\x00\x00\x00\x00\x80"))
-32768
>>> read_long1(io.BytesIO(b"\x00\x00\x00\x00"))
0
"""
n = read_int4(f)
if n < 0:
raise ValueError("long4 byte count < 0: %d" % n)
data = f.read(n)
if len(data) != n:
raise ValueError("not enough data in stream to read long4")
return decode_long(data)
long4 = ArgumentDescriptor(
name="long4",
n=TAKEN_FROM_ARGUMENT4,
reader=read_long4,
doc="""A binary representation of a long, little-endian.
This first reads four bytes as a signed size (but requires the
size to be >= 0), then reads that many bytes and interprets them
as a little-endian 2's-complement long. If the size is 0, that's taken
as a shortcut for the int 0, although LONG1 should really be used
then instead (and in any case where # of bytes < 256).
""")
##############################################################################
# Object descriptors. The stack used by the pickle machine holds objects,
# and in the stack_before and stack_after attributes of OpcodeInfo
# descriptors we need names to describe the various types of objects that can
# appear on the stack.
class StackObject(object):
__slots__ = (
# name of descriptor record, for info only
'name',
# type of object, or tuple of type objects (meaning the object can
# be of any type in the tuple)
'obtype',
# human-readable docs for this kind of stack object; a string
'doc',
)
def __init__(self, name, obtype, doc):
assert isinstance(name, str)
self.name = name
assert isinstance(obtype, type) or isinstance(obtype, tuple)
if isinstance(obtype, tuple):
for contained in obtype:
assert isinstance(contained, type)
self.obtype = obtype
assert isinstance(doc, str)
self.doc = doc
def __repr__(self):
return self.name
pyint = pylong = StackObject(
name='int',
obtype=int,
doc="A Python integer object.")
pyinteger_or_bool = StackObject(
name='int_or_bool',
obtype=(int, bool),
doc="A Python integer or boolean object.")
pybool = StackObject(
name='bool',
obtype=bool,
doc="A Python boolean object.")
pyfloat = StackObject(
name='float',
obtype=float,
doc="A Python float object.")
pybytes_or_str = pystring = StackObject(
name='bytes_or_str',
obtype=(bytes, str),
doc="A Python bytes or (Unicode) string object.")
pybytes = StackObject(
name='bytes',
obtype=bytes,
doc="A Python bytes object.")
pyunicode = StackObject(
name='str',
obtype=str,
doc="A Python (Unicode) string object.")
pynone = StackObject(
name="None",
obtype=type(None),
doc="The Python None object.")
pytuple = StackObject(
name="tuple",
obtype=tuple,
doc="A Python tuple object.")
pylist = StackObject(
name="list",
obtype=list,
doc="A Python list object.")
pydict = StackObject(
name="dict",
obtype=dict,
doc="A Python dict object.")
pyset = StackObject(
name="set",
obtype=set,
doc="A Python set object.")
pyfrozenset = StackObject(
name="frozenset",
obtype=set,
doc="A Python frozenset object.")
anyobject = StackObject(
name='any',
obtype=object,
doc="Any kind of object whatsoever.")
markobject = StackObject(
name="mark",
obtype=StackObject,
doc="""'The mark' is a unique object.
Opcodes that operate on a variable number of objects
generally don't embed the count of objects in the opcode,
or pull it off the stack. Instead the MARK opcode is used
to push a special marker object on the stack, and then
some other opcodes grab all the objects from the top of
the stack down to (but not including) the topmost marker
object.
""")
stackslice = StackObject(
name="stackslice",
obtype=StackObject,
doc="""An object representing a contiguous slice of the stack.
This is used in conjunction with markobject, to represent all
of the stack following the topmost markobject. For example,
the POP_MARK opcode changes the stack from
[..., markobject, stackslice]
to
[...]
No matter how many object are on the stack after the topmost
markobject, POP_MARK gets rid of all of them (including the
topmost markobject too).
""")
##############################################################################
# Descriptors for pickle opcodes.
class OpcodeInfo(object):
__slots__ = (
# symbolic name of opcode; a string
'name',
# the code used in a bytestream to represent the opcode; a
# one-character string
'code',
# If the opcode has an argument embedded in the byte string, an
# instance of ArgumentDescriptor specifying its type. Note that
# arg.reader(s) can be used to read and decode the argument from
# the bytestream s, and arg.doc documents the format of the raw
# argument bytes. If the opcode doesn't have an argument embedded
# in the bytestream, arg should be None.
'arg',
# what the stack looks like before this opcode runs; a list
'stack_before',
# what the stack looks like after this opcode runs; a list
'stack_after',
# the protocol number in which this opcode was introduced; an int
'proto',
# human-readable docs for this opcode; a string
'doc',
)
def __init__(self, name, code, arg,
stack_before, stack_after, proto, doc):
assert isinstance(name, str)
self.name = name
assert isinstance(code, str)
assert len(code) == 1
self.code = code
assert arg is None or isinstance(arg, ArgumentDescriptor)
self.arg = arg
assert isinstance(stack_before, list)
for x in stack_before:
assert isinstance(x, StackObject)
self.stack_before = stack_before
assert isinstance(stack_after, list)
for x in stack_after:
assert isinstance(x, StackObject)
self.stack_after = stack_after
assert isinstance(proto, int) and 0 <= proto <= pickle.HIGHEST_PROTOCOL
self.proto = proto
assert isinstance(doc, str)
self.doc = doc
I = OpcodeInfo
opcodes = [
# Ways to spell integers.
I(name='INT',
code='I',
arg=decimalnl_short,
stack_before=[],
stack_after=[pyinteger_or_bool],
proto=0,
doc="""Push an integer or bool.
The argument is a newline-terminated decimal literal string.
The intent may have been that this always fit in a short Python int,
but INT can be generated in pickles written on a 64-bit box that
require a Python long on a 32-bit box. The difference between this
and LONG then is that INT skips a trailing 'L', and produces a short
int whenever possible.
Another difference is due to that, when bool was introduced as a
distinct type in 2.3, builtin names True and False were also added to
2.2.2, mapping to ints 1 and 0. For compatibility in both directions,
True gets pickled as INT + "I01\\n", and False as INT + "I00\\n".
Leading zeroes are never produced for a genuine integer. The 2.3
(and later) unpicklers special-case these and return bool instead;
earlier unpicklers ignore the leading "0" and return the int.
"""),
I(name='BININT',
code='J',
arg=int4,
stack_before=[],
stack_after=[pyint],
proto=1,
doc="""Push a four-byte signed integer.
This handles the full range of Python (short) integers on a 32-bit
box, directly as binary bytes (1 for the opcode and 4 for the integer).
If the integer is non-negative and fits in 1 or 2 bytes, pickling via
BININT1 or BININT2 saves space.
"""),
I(name='BININT1',
code='K',
arg=uint1,
stack_before=[],
stack_after=[pyint],
proto=1,
doc="""Push a one-byte unsigned integer.
This is a space optimization for pickling very small non-negative ints,
in range(256).
"""),
I(name='BININT2',
code='M',
arg=uint2,
stack_before=[],
stack_after=[pyint],
proto=1,
doc="""Push a two-byte unsigned integer.
This is a space optimization for pickling small positive ints, in
range(256, 2**16). Integers in range(256) can also be pickled via
BININT2, but BININT1 instead saves a byte.
"""),
I(name='LONG',
code='L',
arg=decimalnl_long,
stack_before=[],
stack_after=[pyint],
proto=0,
doc="""Push a long integer.
The same as INT, except that the literal ends with 'L', and always
unpickles to a Python long. There doesn't seem a real purpose to the
trailing 'L'.
Note that LONG takes time quadratic in the number of digits when
unpickling (this is simply due to the nature of decimal->binary
conversion). Proto 2 added linear-time (in C; still quadratic-time
in Python) LONG1 and LONG4 opcodes.
"""),
I(name="LONG1",
code='\x8a',
arg=long1,
stack_before=[],
stack_after=[pyint],
proto=2,
doc="""Long integer using one-byte length.
A more efficient encoding of a Python long; the long1 encoding
says it all."""),
I(name="LONG4",
code='\x8b',
arg=long4,
stack_before=[],
stack_after=[pyint],
proto=2,
doc="""Long integer using found-byte length.
A more efficient encoding of a Python long; the long4 encoding
says it all."""),
# Ways to spell strings (8-bit, not Unicode).
I(name='STRING',
code='S',
arg=stringnl,
stack_before=[],
stack_after=[pybytes_or_str],
proto=0,
doc="""Push a Python string object.
The argument is a repr-style string, with bracketing quote characters,
and perhaps embedded escapes. The argument extends until the next
newline character. These are usually decoded into a str instance
using the encoding given to the Unpickler constructor. or the default,
'ASCII'. If the encoding given was 'bytes' however, they will be
decoded as bytes object instead.
"""),
I(name='BINSTRING',
code='T',
arg=string4,
stack_before=[],
stack_after=[pybytes_or_str],
proto=1,
doc="""Push a Python string object.
There are two arguments: the first is a 4-byte little-endian
signed int giving the number of bytes in the string, and the
second is that many bytes, which are taken literally as the string
content. These are usually decoded into a str instance using the
encoding given to the Unpickler constructor. or the default,
'ASCII'. If the encoding given was 'bytes' however, they will be
decoded as bytes object instead.
"""),
I(name='SHORT_BINSTRING',
code='U',
arg=string1,
stack_before=[],
stack_after=[pybytes_or_str],
proto=1,
doc="""Push a Python string object.
There are two arguments: the first is a 1-byte unsigned int giving
the number of bytes in the string, and the second is that many
bytes, which are taken literally as the string content. These are
usually decoded into a str instance using the encoding given to
the Unpickler constructor. or the default, 'ASCII'. If the
encoding given was 'bytes' however, they will be decoded as bytes
object instead.
"""),
# Bytes (protocol 3 only; older protocols don't support bytes at all)
I(name='BINBYTES',
code='B',
arg=bytes4,
stack_before=[],
stack_after=[pybytes],
proto=3,
doc="""Push a Python bytes object.
There are two arguments: the first is a 4-byte little-endian unsigned int
giving the number of bytes, and the second is that many bytes, which are
taken literally as the bytes content.
"""),
I(name='SHORT_BINBYTES',
code='C',
arg=bytes1,
stack_before=[],
stack_after=[pybytes],
proto=3,
doc="""Push a Python bytes object.
There are two arguments: the first is a 1-byte unsigned int giving
the number of bytes, and the second is that many bytes, which are taken
literally as the string content.
"""),
I(name='BINBYTES8',
code='\x8e',
arg=bytes8,
stack_before=[],
stack_after=[pybytes],
proto=4,
doc="""Push a Python bytes object.
There are two arguments: the first is a 8-byte unsigned int giving
the number of bytes in the string, and the second is that many bytes,
which are taken literally as the string content.
"""),
# Ways to spell None.
I(name='NONE',
code='N',
arg=None,
stack_before=[],
stack_after=[pynone],
proto=0,
doc="Push None on the stack."),
# Ways to spell bools, starting with proto 2. See INT for how this was
# done before proto 2.
I(name='NEWTRUE',
code='\x88',
arg=None,
stack_before=[],
stack_after=[pybool],
proto=2,
doc="""True.
Push True onto the stack."""),
I(name='NEWFALSE',
code='\x89',
arg=None,
stack_before=[],
stack_after=[pybool],
proto=2,
doc="""True.
Push False onto the stack."""),
# Ways to spell Unicode strings.
I(name='UNICODE',
code='V',
arg=unicodestringnl,
stack_before=[],
stack_after=[pyunicode],
proto=0, # this may be pure-text, but it's a later addition
doc="""Push a Python Unicode string object.
The argument is a raw-unicode-escape encoding of a Unicode string,
and so may contain embedded escape sequences. The argument extends
until the next newline character.
"""),
I(name='SHORT_BINUNICODE',
code='\x8c',
arg=unicodestring1,
stack_before=[],
stack_after=[pyunicode],
proto=4,
doc="""Push a Python Unicode string object.
There are two arguments: the first is a 1-byte little-endian signed int
giving the number of bytes in the string. The second is that many
bytes, and is the UTF-8 encoding of the Unicode string.
"""),
I(name='BINUNICODE',
code='X',
arg=unicodestring4,
stack_before=[],
stack_after=[pyunicode],
proto=1,
doc="""Push a Python Unicode string object.
There are two arguments: the first is a 4-byte little-endian unsigned int
giving the number of bytes in the string. The second is that many
bytes, and is the UTF-8 encoding of the Unicode string.
"""),
I(name='BINUNICODE8',
code='\x8d',
arg=unicodestring8,
stack_before=[],
stack_after=[pyunicode],
proto=4,
doc="""Push a Python Unicode string object.
There are two arguments: the first is a 8-byte little-endian signed int
giving the number of bytes in the string. The second is that many
bytes, and is the UTF-8 encoding of the Unicode string.
"""),
# Ways to spell floats.
I(name='FLOAT',
code='F',
arg=floatnl,
stack_before=[],
stack_after=[pyfloat],
proto=0,
doc="""Newline-terminated decimal float literal.
The argument is repr(a_float), and in general requires 17 significant
digits for roundtrip conversion to be an identity (this is so for
IEEE-754 double precision values, which is what Python float maps to
on most boxes).
In general, FLOAT cannot be used to transport infinities, NaNs, or
minus zero across boxes (or even on a single box, if the platform C
library can't read the strings it produces for such things -- Windows
is like that), but may do less damage than BINFLOAT on boxes with
greater precision or dynamic range than IEEE-754 double.
"""),
I(name='BINFLOAT',
code='G',
arg=float8,
stack_before=[],
stack_after=[pyfloat],
proto=1,
doc="""Float stored in binary form, with 8 bytes of data.
This generally requires less than half the space of FLOAT encoding.
In general, BINFLOAT cannot be used to transport infinities, NaNs, or
minus zero, raises an exception if the exponent exceeds the range of
an IEEE-754 double, and retains no more than 53 bits of precision (if
there are more than that, "add a half and chop" rounding is used to
cut it back to 53 significant bits).
"""),
# Ways to build lists.
I(name='EMPTY_LIST',
code=']',
arg=None,
stack_before=[],
stack_after=[pylist],
proto=1,
doc="Push an empty list."),
I(name='APPEND',
code='a',
arg=None,
stack_before=[pylist, anyobject],
stack_after=[pylist],
proto=0,
doc="""Append an object to a list.
Stack before: ... pylist anyobject
Stack after: ... pylist+[anyobject]
although pylist is really extended in-place.
"""),
I(name='APPENDS',
code='e',
arg=None,
stack_before=[pylist, markobject, stackslice],
stack_after=[pylist],
proto=1,
doc="""Extend a list by a slice of stack objects.
Stack before: ... pylist markobject stackslice
Stack after: ... pylist+stackslice
although pylist is really extended in-place.
"""),
I(name='LIST',
code='l',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[pylist],
proto=0,
doc="""Build a list out of the topmost stack slice, after markobject.
All the stack entries following the topmost markobject are placed into
a single Python list, which single list object replaces all of the
stack from the topmost markobject onward. For example,
Stack before: ... markobject 1 2 3 'abc'
Stack after: ... [1, 2, 3, 'abc']
"""),
# Ways to build tuples.
I(name='EMPTY_TUPLE',
code=')',
arg=None,
stack_before=[],
stack_after=[pytuple],
proto=1,
doc="Push an empty tuple."),
I(name='TUPLE',
code='t',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[pytuple],
proto=0,
doc="""Build a tuple out of the topmost stack slice, after markobject.
All the stack entries following the topmost markobject are placed into
a single Python tuple, which single tuple object replaces all of the
stack from the topmost markobject onward. For example,
Stack before: ... markobject 1 2 3 'abc'
Stack after: ... (1, 2, 3, 'abc')
"""),
I(name='TUPLE1',
code='\x85',
arg=None,
stack_before=[anyobject],
stack_after=[pytuple],
proto=2,
doc="""Build a one-tuple out of the topmost item on the stack.
This code pops one value off the stack and pushes a tuple of
length 1 whose one item is that value back onto it. In other
words:
stack[-1] = tuple(stack[-1:])
"""),
I(name='TUPLE2',
code='\x86',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[pytuple],
proto=2,
doc="""Build a two-tuple out of the top two items on the stack.
This code pops two values off the stack and pushes a tuple of
length 2 whose items are those values back onto it. In other
words:
stack[-2:] = [tuple(stack[-2:])]
"""),
I(name='TUPLE3',
code='\x87',
arg=None,
stack_before=[anyobject, anyobject, anyobject],
stack_after=[pytuple],
proto=2,
doc="""Build a three-tuple out of the top three items on the stack.
This code pops three values off the stack and pushes a tuple of
length 3 whose items are those values back onto it. In other
words:
stack[-3:] = [tuple(stack[-3:])]
"""),
# Ways to build dicts.
I(name='EMPTY_DICT',
code='}',
arg=None,
stack_before=[],
stack_after=[pydict],
proto=1,
doc="Push an empty dict."),
I(name='DICT',
code='d',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[pydict],
proto=0,
doc="""Build a dict out of the topmost stack slice, after markobject.
All the stack entries following the topmost markobject are placed into
a single Python dict, which single dict object replaces all of the
stack from the topmost markobject onward. The stack slice alternates
key, value, key, value, .... For example,
Stack before: ... markobject 1 2 3 'abc'
Stack after: ... {1: 2, 3: 'abc'}
"""),
I(name='SETITEM',
code='s',
arg=None,
stack_before=[pydict, anyobject, anyobject],
stack_after=[pydict],
proto=0,
doc="""Add a key+value pair to an existing dict.
Stack before: ... pydict key value
Stack after: ... pydict
where pydict has been modified via pydict[key] = value.
"""),
I(name='SETITEMS',
code='u',
arg=None,
stack_before=[pydict, markobject, stackslice],
stack_after=[pydict],
proto=1,
doc="""Add an arbitrary number of key+value pairs to an existing dict.
The slice of the stack following the topmost markobject is taken as
an alternating sequence of keys and values, added to the dict
immediately under the topmost markobject. Everything at and after the
topmost markobject is popped, leaving the mutated dict at the top
of the stack.
Stack before: ... pydict markobject key_1 value_1 ... key_n value_n
Stack after: ... pydict
where pydict has been modified via pydict[key_i] = value_i for i in
1, 2, ..., n, and in that order.
"""),
# Ways to build sets
I(name='EMPTY_SET',
code='\x8f',
arg=None,
stack_before=[],
stack_after=[pyset],
proto=4,
doc="Push an empty set."),
I(name='ADDITEMS',
code='\x90',
arg=None,
stack_before=[pyset, markobject, stackslice],
stack_after=[pyset],
proto=4,
doc="""Add an arbitrary number of items to an existing set.
The slice of the stack following the topmost markobject is taken as
a sequence of items, added to the set immediately under the topmost
markobject. Everything at and after the topmost markobject is popped,
leaving the mutated set at the top of the stack.
Stack before: ... pyset markobject item_1 ... item_n
Stack after: ... pyset
where pyset has been modified via pyset.add(item_i) = item_i for i in
1, 2, ..., n, and in that order.
"""),
# Way to build frozensets
I(name='FROZENSET',
code='\x91',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[pyfrozenset],
proto=4,
doc="""Build a frozenset out of the topmost slice, after markobject.
All the stack entries following the topmost markobject are placed into
a single Python frozenset, which single frozenset object replaces all
of the stack from the topmost markobject onward. For example,
Stack before: ... markobject 1 2 3
Stack after: ... frozenset({1, 2, 3})
"""),
# Stack manipulation.
I(name='POP',
code='0',
arg=None,
stack_before=[anyobject],
stack_after=[],
proto=0,
doc="Discard the top stack item, shrinking the stack by one item."),
I(name='DUP',
code='2',
arg=None,
stack_before=[anyobject],
stack_after=[anyobject, anyobject],
proto=0,
doc="Push the top stack item onto the stack again, duplicating it."),
I(name='MARK',
code='(',
arg=None,
stack_before=[],
stack_after=[markobject],
proto=0,
doc="""Push markobject onto the stack.
markobject is a unique object, used by other opcodes to identify a
region of the stack containing a variable number of objects for them
to work on. See markobject.doc for more detail.
"""),
I(name='POP_MARK',
code='1',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[],
proto=1,
doc="""Pop all the stack objects at and above the topmost markobject.
When an opcode using a variable number of stack objects is done,
POP_MARK is used to remove those objects, and to remove the markobject
that delimited their starting position on the stack.
"""),
# Memo manipulation. There are really only two operations (get and put),
# each in all-text, "short binary", and "long binary" flavors.
I(name='GET',
code='g',
arg=decimalnl_short,
stack_before=[],
stack_after=[anyobject],
proto=0,
doc="""Read an object from the memo and push it on the stack.
The index of the memo object to push is given by the newline-terminated
decimal string following. BINGET and LONG_BINGET are space-optimized
versions.
"""),
I(name='BINGET',
code='h',
arg=uint1,
stack_before=[],
stack_after=[anyobject],
proto=1,
doc="""Read an object from the memo and push it on the stack.
The index of the memo object to push is given by the 1-byte unsigned
integer following.
"""),
I(name='LONG_BINGET',
code='j',
arg=uint4,
stack_before=[],
stack_after=[anyobject],
proto=1,
doc="""Read an object from the memo and push it on the stack.
The index of the memo object to push is given by the 4-byte unsigned
little-endian integer following.
"""),
I(name='PUT',
code='p',
arg=decimalnl_short,
stack_before=[],
stack_after=[],
proto=0,
doc="""Store the stack top into the memo. The stack is not popped.
The index of the memo location to write into is given by the newline-
terminated decimal string following. BINPUT and LONG_BINPUT are
space-optimized versions.
"""),
I(name='BINPUT',
code='q',
arg=uint1,
stack_before=[],
stack_after=[],
proto=1,
doc="""Store the stack top into the memo. The stack is not popped.
The index of the memo location to write into is given by the 1-byte
unsigned integer following.
"""),
I(name='LONG_BINPUT',
code='r',
arg=uint4,
stack_before=[],
stack_after=[],
proto=1,
doc="""Store the stack top into the memo. The stack is not popped.
The index of the memo location to write into is given by the 4-byte
unsigned little-endian integer following.
"""),
I(name='MEMOIZE',
code='\x94',
arg=None,
stack_before=[anyobject],
stack_after=[anyobject],
proto=4,
doc="""Store the stack top into the memo. The stack is not popped.
The index of the memo location to write is the number of
elements currently present in the memo.
"""),
# Access the extension registry (predefined objects). Akin to the GET
# family.
I(name='EXT1',
code='\x82',
arg=uint1,
stack_before=[],
stack_after=[anyobject],
proto=2,
doc="""Extension code.
This code and the similar EXT2 and EXT4 allow using a registry
of popular objects that are pickled by name, typically classes.
It is envisioned that through a global negotiation and
registration process, third parties can set up a mapping between
ints and object names.
In order to guarantee pickle interchangeability, the extension
code registry ought to be global, although a range of codes may
be reserved for private use.
EXT1 has a 1-byte integer argument. This is used to index into the
extension registry, and the object at that index is pushed on the stack.
"""),
I(name='EXT2',
code='\x83',
arg=uint2,
stack_before=[],
stack_after=[anyobject],
proto=2,
doc="""Extension code.
See EXT1. EXT2 has a two-byte integer argument.
"""),
I(name='EXT4',
code='\x84',
arg=int4,
stack_before=[],
stack_after=[anyobject],
proto=2,
doc="""Extension code.
See EXT1. EXT4 has a four-byte integer argument.
"""),
# Push a class object, or module function, on the stack, via its module
# and name.
I(name='GLOBAL',
code='c',
arg=stringnl_noescape_pair,
stack_before=[],
stack_after=[anyobject],
proto=0,
doc="""Push a global object (module.attr) on the stack.
Two newline-terminated strings follow the GLOBAL opcode. The first is
taken as a module name, and the second as a class name. The class
object module.class is pushed on the stack. More accurately, the
object returned by self.find_class(module, class) is pushed on the
stack, so unpickling subclasses can override this form of lookup.
"""),
I(name='STACK_GLOBAL',
code='\x93',
arg=None,
stack_before=[pyunicode, pyunicode],
stack_after=[anyobject],
proto=0,
doc="""Push a global object (module.attr) on the stack.
"""),
# Ways to build objects of classes pickle doesn't know about directly
# (user-defined classes). I despair of documenting this accurately
# and comprehensibly -- you really have to read the pickle code to
# find all the special cases.
I(name='REDUCE',
code='R',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[anyobject],
proto=0,
doc="""Push an object built from a callable and an argument tuple.
The opcode is named to remind of the __reduce__() method.
Stack before: ... callable pytuple
Stack after: ... callable(*pytuple)
The callable and the argument tuple are the first two items returned
by a __reduce__ method. Applying the callable to the argtuple is
supposed to reproduce the original object, or at least get it started.
If the __reduce__ method returns a 3-tuple, the last component is an
argument to be passed to the object's __setstate__, and then the REDUCE
opcode is followed by code to create setstate's argument, and then a
BUILD opcode to apply __setstate__ to that argument.
If not isinstance(callable, type), REDUCE complains unless the
callable has been registered with the copyreg module's
safe_constructors dict, or the callable has a magic
'__safe_for_unpickling__' attribute with a true value. I'm not sure
why it does this, but I've sure seen this complaint often enough when
I didn't want to <wink>.
"""),
I(name='BUILD',
code='b',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[anyobject],
proto=0,
doc="""Finish building an object, via __setstate__ or dict update.
Stack before: ... anyobject argument
Stack after: ... anyobject
where anyobject may have been mutated, as follows:
If the object has a __setstate__ method,
anyobject.__setstate__(argument)
is called.
Else the argument must be a dict, the object must have a __dict__, and
the object is updated via
anyobject.__dict__.update(argument)
"""),
I(name='INST',
code='i',
arg=stringnl_noescape_pair,
stack_before=[markobject, stackslice],
stack_after=[anyobject],
proto=0,
doc="""Build a class instance.
This is the protocol 0 version of protocol 1's OBJ opcode.
INST is followed by two newline-terminated strings, giving a
module and class name, just as for the GLOBAL opcode (and see
GLOBAL for more details about that). self.find_class(module, name)
is used to get a class object.
In addition, all the objects on the stack following the topmost
markobject are gathered into a tuple and popped (along with the
topmost markobject), just as for the TUPLE opcode.
Now it gets complicated. If all of these are true:
+ The argtuple is empty (markobject was at the top of the stack
at the start).
+ The class object does not have a __getinitargs__ attribute.
then we want to create an old-style class instance without invoking
its __init__() method (pickle has waffled on this over the years; not
calling __init__() is current wisdom). In this case, an instance of
an old-style dummy class is created, and then we try to rebind its
__class__ attribute to the desired class object. If this succeeds,
the new instance object is pushed on the stack, and we're done.
Else (the argtuple is not empty, it's not an old-style class object,
or the class object does have a __getinitargs__ attribute), the code
first insists that the class object have a __safe_for_unpickling__
attribute. Unlike as for the __safe_for_unpickling__ check in REDUCE,
it doesn't matter whether this attribute has a true or false value, it
only matters whether it exists (XXX this is a bug). If
__safe_for_unpickling__ doesn't exist, UnpicklingError is raised.
Else (the class object does have a __safe_for_unpickling__ attr),
the class object obtained from INST's arguments is applied to the
argtuple obtained from the stack, and the resulting instance object
is pushed on the stack.
NOTE: checks for __safe_for_unpickling__ went away in Python 2.3.
NOTE: the distinction between old-style and new-style classes does
not make sense in Python 3.
"""),
I(name='OBJ',
code='o',
arg=None,
stack_before=[markobject, anyobject, stackslice],
stack_after=[anyobject],
proto=1,
doc="""Build a class instance.
This is the protocol 1 version of protocol 0's INST opcode, and is
very much like it. The major difference is that the class object
is taken off the stack, allowing it to be retrieved from the memo
repeatedly if several instances of the same class are created. This
can be much more efficient (in both time and space) than repeatedly
embedding the module and class names in INST opcodes.
Unlike INST, OBJ takes no arguments from the opcode stream. Instead
the class object is taken off the stack, immediately above the
topmost markobject:
Stack before: ... markobject classobject stackslice
Stack after: ... new_instance_object
As for INST, the remainder of the stack above the markobject is
gathered into an argument tuple, and then the logic seems identical,
except that no __safe_for_unpickling__ check is done (XXX this is
a bug). See INST for the gory details.
NOTE: In Python 2.3, INST and OBJ are identical except for how they
get the class object. That was always the intent; the implementations
had diverged for accidental reasons.
"""),
I(name='NEWOBJ',
code='\x81',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[anyobject],
proto=2,
doc="""Build an object instance.
The stack before should be thought of as containing a class
object followed by an argument tuple (the tuple being the stack
top). Call these cls and args. They are popped off the stack,
and the value returned by cls.__new__(cls, *args) is pushed back
onto the stack.
"""),
I(name='NEWOBJ_EX',
code='\x92',
arg=None,
stack_before=[anyobject, anyobject, anyobject],
stack_after=[anyobject],
proto=4,
doc="""Build an object instance.
The stack before should be thought of as containing a class
object followed by an argument tuple and by a keyword argument dict
(the dict being the stack top). Call these cls and args. They are
popped off the stack, and the value returned by
cls.__new__(cls, *args, *kwargs) is pushed back onto the stack.
"""),
# Machine control.
I(name='PROTO',
code='\x80',
arg=uint1,
stack_before=[],
stack_after=[],
proto=2,
doc="""Protocol version indicator.
For protocol 2 and above, a pickle must start with this opcode.
The argument is the protocol version, an int in range(2, 256).
"""),
I(name='STOP',
code='.',
arg=None,
stack_before=[anyobject],
stack_after=[],
proto=0,
doc="""Stop the unpickling machine.
Every pickle ends with this opcode. The object at the top of the stack
is popped, and that's the result of unpickling. The stack should be
empty then.
"""),
# Framing support.
I(name='FRAME',
code='\x95',
arg=uint8,
stack_before=[],
stack_after=[],
proto=4,
doc="""Indicate the beginning of a new frame.
The unpickler may use this opcode to safely prefetch data from its
underlying stream.
"""),
# Ways to deal with persistent IDs.
I(name='PERSID',
code='P',
arg=stringnl_noescape,
stack_before=[],
stack_after=[anyobject],
proto=0,
doc="""Push an object identified by a persistent ID.
The pickle module doesn't define what a persistent ID means. PERSID's
argument is a newline-terminated str-style (no embedded escapes, no
bracketing quote characters) string, which *is* "the persistent ID".
The unpickler passes this string to self.persistent_load(). Whatever
object that returns is pushed on the stack. There is no implementation
of persistent_load() in Python's unpickler: it must be supplied by an
unpickler subclass.
"""),
I(name='BINPERSID',
code='Q',
arg=None,
stack_before=[anyobject],
stack_after=[anyobject],
proto=1,
doc="""Push an object identified by a persistent ID.
Like PERSID, except the persistent ID is popped off the stack (instead
of being a string embedded in the opcode bytestream). The persistent
ID is passed to self.persistent_load(), and whatever object that
returns is pushed on the stack. See PERSID for more detail.
"""),
]
del I
# Verify uniqueness of .name and .code members.
name2i = {}
code2i = {}
for i, d in enumerate(opcodes):
if d.name in name2i:
raise ValueError("repeated name %r at indices %d and %d" %
(d.name, name2i[d.name], i))
if d.code in code2i:
raise ValueError("repeated code %r at indices %d and %d" %
(d.code, code2i[d.code], i))
name2i[d.name] = i
code2i[d.code] = i
del name2i, code2i, i, d
##############################################################################
# Build a code2op dict, mapping opcode characters to OpcodeInfo records.
# Also ensure we've got the same stuff as pickle.py, although the
# introspection here is dicey.
code2op = {}
for d in opcodes:
code2op[d.code] = d
del d
def assure_pickle_consistency(verbose=False):
copy = code2op.copy()
for name in pickle.__all__:
if not re.match("[A-Z][A-Z0-9_]+$", name):
if verbose:
print("skipping %r: it doesn't look like an opcode name" % name)
continue
picklecode = getattr(pickle, name)
if not isinstance(picklecode, bytes) or len(picklecode) != 1:
if verbose:
print(("skipping %r: value %r doesn't look like a pickle "
"code" % (name, picklecode)))
continue
picklecode = picklecode.decode("latin-1")
if picklecode in copy:
if verbose:
print("checking name %r w/ code %r for consistency" % (
name, picklecode))
d = copy[picklecode]
if d.name != name:
raise ValueError("for pickle code %r, pickle.py uses name %r "
"but we're using name %r" % (picklecode,
name,
d.name))
# Forget this one. Any left over in copy at the end are a problem
# of a different kind.
del copy[picklecode]
else:
raise ValueError("pickle.py appears to have a pickle opcode with "
"name %r and code %r, but we don't" %
(name, picklecode))
if copy:
msg = ["we appear to have pickle opcodes that pickle.py doesn't have:"]
for code, d in copy.items():
msg.append(" name %r with code %r" % (d.name, code))
raise ValueError("\n".join(msg))
assure_pickle_consistency()
del assure_pickle_consistency
##############################################################################
# A pickle opcode generator.
def _genops(data, yield_end_pos=False):
if isinstance(data, bytes_types):
data = io.BytesIO(data)
if hasattr(data, "tell"):
getpos = data.tell
else:
getpos = lambda: None
while True:
pos = getpos()
code = data.read(1)
opcode = code2op.get(code.decode("latin-1"))
if opcode is None:
if code == b"":
raise ValueError("pickle exhausted before seeing STOP")
else:
raise ValueError("at position %s, opcode %r unknown" % (
"<unknown>" if pos is None else pos,
code))
if opcode.arg is None:
arg = None
else:
arg = opcode.arg.reader(data)
if yield_end_pos:
yield opcode, arg, pos, getpos()
else:
yield opcode, arg, pos
if code == b'.':
assert opcode.name == 'STOP'
break
def genops(pickle):
"""Generate all the opcodes in a pickle.
'pickle' is a file-like object, or string, containing the pickle.
Each opcode in the pickle is generated, from the current pickle position,
stopping after a STOP opcode is delivered. A triple is generated for
each opcode:
opcode, arg, pos
opcode is an OpcodeInfo record, describing the current opcode.
If the opcode has an argument embedded in the pickle, arg is its decoded
value, as a Python object. If the opcode doesn't have an argument, arg
is None.
If the pickle has a tell() method, pos was the value of pickle.tell()
before reading the current opcode. If the pickle is a bytes object,
it's wrapped in a BytesIO object, and the latter's tell() result is
used. Else (the pickle doesn't have a tell(), and it's not obvious how
to query its current position) pos is None.
"""
return _genops(pickle)
##############################################################################
# A pickle optimizer.
def optimize(p):
'Optimize a pickle string by removing unused PUT opcodes'
not_a_put = object()
gets = { not_a_put } # set of args used by a GET opcode
opcodes = [] # (startpos, stoppos, putid)
proto = 0
for opcode, arg, pos, end_pos in _genops(p, yield_end_pos=True):
if 'PUT' in opcode.name:
opcodes.append((pos, end_pos, arg))
elif 'FRAME' in opcode.name:
pass
else:
if 'GET' in opcode.name:
gets.add(arg)
elif opcode.name == 'PROTO':
assert pos == 0, pos
proto = arg
opcodes.append((pos, end_pos, not_a_put))
prevpos, prevarg = pos, None
# Copy the opcodes except for PUTS without a corresponding GET
out = io.BytesIO()
opcodes = iter(opcodes)
if proto >= 2:
# Write the PROTO header before any framing
start, stop, _ = next(opcodes)
out.write(p[start:stop])
buf = pickle._Framer(out.write)
if proto >= 4:
buf.start_framing()
for start, stop, putid in opcodes:
if putid in gets:
buf.commit_frame()
buf.write(p[start:stop])
if proto >= 4:
buf.end_framing()
return out.getvalue()
##############################################################################
# A symbolic pickle disassembler.
def dis(pickle, out=None, memo=None, indentlevel=4, annotate=0):
"""Produce a symbolic disassembly of a pickle.
'pickle' is a file-like object, or string, containing a (at least one)
pickle. The pickle is disassembled from the current position, through
the first STOP opcode encountered.
Optional arg 'out' is a file-like object to which the disassembly is
printed. It defaults to sys.stdout.
Optional arg 'memo' is a Python dict, used as the pickle's memo. It
may be mutated by dis(), if the pickle contains PUT or BINPUT opcodes.
Passing the same memo object to another dis() call then allows disassembly
to proceed across multiple pickles that were all created by the same
pickler with the same memo. Ordinarily you don't need to worry about this.
Optional arg 'indentlevel' is the number of blanks by which to indent
a new MARK level. It defaults to 4.
Optional arg 'annotate' if nonzero instructs dis() to add short
description of the opcode on each line of disassembled output.
The value given to 'annotate' must be an integer and is used as a
hint for the column where annotation should start. The default
value is 0, meaning no annotations.
In addition to printing the disassembly, some sanity checks are made:
+ All embedded opcode arguments "make sense".
+ Explicit and implicit pop operations have enough items on the stack.
+ When an opcode implicitly refers to a markobject, a markobject is
actually on the stack.
+ A memo entry isn't referenced before it's defined.
+ The markobject isn't stored in the memo.
+ A memo entry isn't redefined.
"""
# Most of the hair here is for sanity checks, but most of it is needed
# anyway to detect when a protocol 0 POP takes a MARK off the stack
# (which in turn is needed to indent MARK blocks correctly).
stack = [] # crude emulation of unpickler stack
if memo is None:
memo = {} # crude emulation of unpickler memo
maxproto = -1 # max protocol number seen
markstack = [] # bytecode positions of MARK opcodes
indentchunk = ' ' * indentlevel
errormsg = None
annocol = annotate # column hint for annotations
for opcode, arg, pos in genops(pickle):
if pos is not None:
print("%5d:" % pos, end=' ', file=out)
line = "%-4s %s%s" % (repr(opcode.code)[1:-1],
indentchunk * len(markstack),
opcode.name)
maxproto = max(maxproto, opcode.proto)
before = opcode.stack_before # don't mutate
after = opcode.stack_after # don't mutate
numtopop = len(before)
# See whether a MARK should be popped.
markmsg = None
if markobject in before or (opcode.name == "POP" and
stack and
stack[-1] is markobject):
assert markobject not in after
if __debug__:
if markobject in before:
assert before[-1] is stackslice
if markstack:
markpos = markstack.pop()
if markpos is None:
markmsg = "(MARK at unknown opcode offset)"
else:
markmsg = "(MARK at %d)" % markpos
# Pop everything at and after the topmost markobject.
while stack[-1] is not markobject:
stack.pop()
stack.pop()
# Stop later code from popping too much.
try:
numtopop = before.index(markobject)
except ValueError:
assert opcode.name == "POP"
numtopop = 0
else:
errormsg = markmsg = "no MARK exists on stack"
# Check for correct memo usage.
if opcode.name in ("PUT", "BINPUT", "LONG_BINPUT", "MEMOIZE"):
if opcode.name == "MEMOIZE":
memo_idx = len(memo)
else:
assert arg is not None
memo_idx = arg
if memo_idx in memo:
errormsg = "memo key %r already defined" % arg
elif not stack:
errormsg = "stack is empty -- can't store into memo"
elif stack[-1] is markobject:
errormsg = "can't store markobject in the memo"
else:
memo[memo_idx] = stack[-1]
elif opcode.name in ("GET", "BINGET", "LONG_BINGET"):
if arg in memo:
assert len(after) == 1
after = [memo[arg]] # for better stack emulation
else:
errormsg = "memo key %r has never been stored into" % arg
if arg is not None or markmsg:
# make a mild effort to align arguments
line += ' ' * (10 - len(opcode.name))
if arg is not None:
line += ' ' + repr(arg)
if markmsg:
line += ' ' + markmsg
if annotate:
line += ' ' * (annocol - len(line))
# make a mild effort to align annotations
annocol = len(line)
if annocol > 50:
annocol = annotate
line += ' ' + opcode.doc.split('\n', 1)[0]
print(line, file=out)
if errormsg:
# Note that we delayed complaining until the offending opcode
# was printed.
raise ValueError(errormsg)
# Emulate the stack effects.
if len(stack) < numtopop:
raise ValueError("tries to pop %d items from stack with "
"only %d items" % (numtopop, len(stack)))
if numtopop:
del stack[-numtopop:]
if markobject in after:
assert markobject not in before
markstack.append(pos)
stack.extend(after)
print("highest protocol among opcodes =", maxproto, file=out)
if stack:
raise ValueError("stack not empty after STOP: %r" % stack)
# For use in the doctest, simply as an example of a class to pickle.
class _Example:
def __init__(self, value):
self.value = value
_dis_test = r"""
>>> import pickle
>>> x = [1, 2, (3, 4), {b'abc': "def"}]
>>> pkl0 = pickle.dumps(x, 0)
>>> dis(pkl0)
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: L LONG 1
9: a APPEND
10: L LONG 2
14: a APPEND
15: ( MARK
16: L LONG 3
20: L LONG 4
24: t TUPLE (MARK at 15)
25: p PUT 1
28: a APPEND
29: ( MARK
30: d DICT (MARK at 29)
31: p PUT 2
34: c GLOBAL '_codecs encode'
50: p PUT 3
53: ( MARK
54: V UNICODE 'abc'
59: p PUT 4
62: V UNICODE 'latin1'
70: p PUT 5
73: t TUPLE (MARK at 53)
74: p PUT 6
77: R REDUCE
78: p PUT 7
81: V UNICODE 'def'
86: p PUT 8
89: s SETITEM
90: a APPEND
91: . STOP
highest protocol among opcodes = 0
Try again with a "binary" pickle.
>>> pkl1 = pickle.dumps(x, 1)
>>> dis(pkl1)
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: K BININT1 1
6: K BININT1 2
8: ( MARK
9: K BININT1 3
11: K BININT1 4
13: t TUPLE (MARK at 8)
14: q BINPUT 1
16: } EMPTY_DICT
17: q BINPUT 2
19: c GLOBAL '_codecs encode'
35: q BINPUT 3
37: ( MARK
38: X BINUNICODE 'abc'
46: q BINPUT 4
48: X BINUNICODE 'latin1'
59: q BINPUT 5
61: t TUPLE (MARK at 37)
62: q BINPUT 6
64: R REDUCE
65: q BINPUT 7
67: X BINUNICODE 'def'
75: q BINPUT 8
77: s SETITEM
78: e APPENDS (MARK at 3)
79: . STOP
highest protocol among opcodes = 1
Exercise the INST/OBJ/BUILD family.
>>> import pickletools
>>> dis(pickle.dumps(pickletools.dis, 0))
0: c GLOBAL 'pickletools dis'
17: p PUT 0
20: . STOP
highest protocol among opcodes = 0
>>> from pickletools import _Example
>>> x = [_Example(42)] * 2
>>> dis(pickle.dumps(x, 0))
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: c GLOBAL 'copy_reg _reconstructor'
30: p PUT 1
33: ( MARK
34: c GLOBAL 'pickletools _Example'
56: p PUT 2
59: c GLOBAL '__builtin__ object'
79: p PUT 3
82: N NONE
83: t TUPLE (MARK at 33)
84: p PUT 4
87: R REDUCE
88: p PUT 5
91: ( MARK
92: d DICT (MARK at 91)
93: p PUT 6
96: V UNICODE 'value'
103: p PUT 7
106: L LONG 42
111: s SETITEM
112: b BUILD
113: a APPEND
114: g GET 5
117: a APPEND
118: . STOP
highest protocol among opcodes = 0
>>> dis(pickle.dumps(x, 1))
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: c GLOBAL 'copy_reg _reconstructor'
29: q BINPUT 1
31: ( MARK
32: c GLOBAL 'pickletools _Example'
54: q BINPUT 2
56: c GLOBAL '__builtin__ object'
76: q BINPUT 3
78: N NONE
79: t TUPLE (MARK at 31)
80: q BINPUT 4
82: R REDUCE
83: q BINPUT 5
85: } EMPTY_DICT
86: q BINPUT 6
88: X BINUNICODE 'value'
98: q BINPUT 7
100: K BININT1 42
102: s SETITEM
103: b BUILD
104: h BINGET 5
106: e APPENDS (MARK at 3)
107: . STOP
highest protocol among opcodes = 1
Try "the canonical" recursive-object test.
>>> L = []
>>> T = L,
>>> L.append(T)
>>> L[0] is T
True
>>> T[0] is L
True
>>> L[0][0] is L
True
>>> T[0][0] is T
True
>>> dis(pickle.dumps(L, 0))
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: ( MARK
6: g GET 0
9: t TUPLE (MARK at 5)
10: p PUT 1
13: a APPEND
14: . STOP
highest protocol among opcodes = 0
>>> dis(pickle.dumps(L, 1))
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: h BINGET 0
6: t TUPLE (MARK at 3)
7: q BINPUT 1
9: a APPEND
10: . STOP
highest protocol among opcodes = 1
Note that, in the protocol 0 pickle of the recursive tuple, the disassembler
has to emulate the stack in order to realize that the POP opcode at 16 gets
rid of the MARK at 0.
>>> dis(pickle.dumps(T, 0))
0: ( MARK
1: ( MARK
2: l LIST (MARK at 1)
3: p PUT 0
6: ( MARK
7: g GET 0
10: t TUPLE (MARK at 6)
11: p PUT 1
14: a APPEND
15: 0 POP
16: 0 POP (MARK at 0)
17: g GET 1
20: . STOP
highest protocol among opcodes = 0
>>> dis(pickle.dumps(T, 1))
0: ( MARK
1: ] EMPTY_LIST
2: q BINPUT 0
4: ( MARK
5: h BINGET 0
7: t TUPLE (MARK at 4)
8: q BINPUT 1
10: a APPEND
11: 1 POP_MARK (MARK at 0)
12: h BINGET 1
14: . STOP
highest protocol among opcodes = 1
Try protocol 2.
>>> dis(pickle.dumps(L, 2))
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: h BINGET 0
7: \x85 TUPLE1
8: q BINPUT 1
10: a APPEND
11: . STOP
highest protocol among opcodes = 2
>>> dis(pickle.dumps(T, 2))
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: h BINGET 0
7: \x85 TUPLE1
8: q BINPUT 1
10: a APPEND
11: 0 POP
12: h BINGET 1
14: . STOP
highest protocol among opcodes = 2
Try protocol 3 with annotations:
>>> dis(pickle.dumps(T, 3), annotate=1)
0: \x80 PROTO 3 Protocol version indicator.
2: ] EMPTY_LIST Push an empty list.
3: q BINPUT 0 Store the stack top into the memo. The stack is not popped.
5: h BINGET 0 Read an object from the memo and push it on the stack.
7: \x85 TUPLE1 Build a one-tuple out of the topmost item on the stack.
8: q BINPUT 1 Store the stack top into the memo. The stack is not popped.
10: a APPEND Append an object to a list.
11: 0 POP Discard the top stack item, shrinking the stack by one item.
12: h BINGET 1 Read an object from the memo and push it on the stack.
14: . STOP Stop the unpickling machine.
highest protocol among opcodes = 2
"""
_memo_test = r"""
>>> import pickle
>>> import io
>>> f = io.BytesIO()
>>> p = pickle.Pickler(f, 2)
>>> x = [1, 2, 3]
>>> p.dump(x)
>>> p.dump(x)
>>> f.seek(0)
0
>>> memo = {}
>>> dis(f, memo=memo)
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: ( MARK
6: K BININT1 1
8: K BININT1 2
10: K BININT1 3
12: e APPENDS (MARK at 5)
13: . STOP
highest protocol among opcodes = 2
>>> dis(f, memo=memo)
14: \x80 PROTO 2
16: h BINGET 0
18: . STOP
highest protocol among opcodes = 2
"""
__test__ = {'disassembler_test': _dis_test,
'disassembler_memo_test': _memo_test,
}
def _test():
import doctest
return doctest.testmod()
if __name__ == "__main__":
import sys, argparse
parser = argparse.ArgumentParser(
description='disassemble one or more pickle files')
parser.add_argument(
'pickle_file', type=argparse.FileType('br'),
nargs='*', help='the pickle file')
parser.add_argument(
'-o', '--output', default=sys.stdout, type=argparse.FileType('w'),
help='the file where the output should be written')
parser.add_argument(
'-m', '--memo', action='store_true',
help='preserve memo between disassemblies')
parser.add_argument(
'-l', '--indentlevel', default=4, type=int,
help='the number of blanks by which to indent a new MARK level')
parser.add_argument(
'-a', '--annotate', action='store_true',
help='annotate each line with a short opcode description')
parser.add_argument(
'-p', '--preamble', default="==> {name} <==",
help='if more than one pickle file is specified, print this before'
' each disassembly')
parser.add_argument(
'-t', '--test', action='store_true',
help='run self-test suite')
parser.add_argument(
'-v', action='store_true',
help='run verbosely; only affects self-test run')
args = parser.parse_args()
if args.test:
_test()
else:
annotate = 30 if args.annotate else 0
if not args.pickle_file:
parser.print_help()
elif len(args.pickle_file) == 1:
dis(args.pickle_file[0], args.output, None,
args.indentlevel, annotate)
else:
memo = {} if args.memo else None
for f in args.pickle_file:
preamble = args.preamble.format(name=f.name)
args.output.write(preamble + '\n')
dis(f, args.output, memo, args.indentlevel, annotate)
|
jessemillar/pythonista | refs/heads/master | Desktop/vssh.py | 1 | """Find the IP address of a VirtualBox virtual machine and ssh into it.\
Add or update /etc/hosts entries upon user interaction."""
import os
import subprocess
import sys
def check_exists(name):
"""Check if the virtual machine exists."""
virtualbox_exists = subprocess.Popen(["VBoxManage", "list", "vms"], stdout=subprocess.PIPE)
if name in virtualbox_exists.communicate()[0]:
return True
else:
return False
def check_up(name):
"""Check if the virtual machine is currently powered on."""
virtualbox_up = subprocess.Popen(["VBoxManage", "list", "runningvms"], stdout=subprocess.PIPE)
if name in virtualbox_up.communicate()[0]:
return True
else:
return False
def find_host(name):
"""Check if an entry already exists in /etc/hosts."""
hosts = open("/etc/hosts", "r")
for line in hosts:
if name in line:
return True
return False
def host_outdated(address, name):
"""Check if the entry for the virtual machine in /etc/hosts is outdated."""
hosts = open("/etc/hosts", "r")
for line in hosts:
if name in line:
if address not in line:
return True
return False
def add_host(address, name):
"""Add an entry in /etc/hosts for the virtual machine."""
hosts = open("/etc/hosts", "rt")
hosts_contents = hosts.read() + "\n" + address + "\t" + name + "\n"
temp_hosts = open("/tmp/etc_hosts.tmp", "wt")
temp_hosts.write(hosts_contents)
# Move the temp hosts file into place with sudo permissions
os.system("sudo mv /tmp/etc_hosts.tmp /etc/hosts")
def update_host(address, name):
"""Update an entry in /etc/hosts to have the correct IP address."""
hosts = open("/etc/hosts", "r")
data = hosts.readlines()
new_hosts = []
for line in data:
if name in line:
new_hosts.append(address + "\t" + name + "\n")
else:
new_hosts.append(line)
temp_hosts = open("/tmp/etc_hosts.tmp", "wt")
temp_hosts.writelines(new_hosts)
# Move the temp hosts file into place with sudo permissions
os.system("sudo mv /tmp/etc_hosts.tmp /etc/hosts")
def main(): # Define as a function to adhere to style guidelines
"""Where the magic happens."""
try:
sys.argv[1]
except IndexError:
print "Missing name of virtual machine"
return
# Check if the user is supplying the virtual machine's name correctly
try:
sys.argv[2]
# If the name is correct, run the program
except IndexError:
if not check_exists(sys.argv[1]):
print "The specified virtual machine does not appear to exist."
return
if not check_up(sys.argv[1]):
headless_input = raw_input("The specified virtual machine does not appear to be running. Would you like to start the machine in 'headless' mode? [Y/n] ")
if len(headless_input) == 0 or headless_input == "Y" or headless_input == "y": # If the user responds in the affirmative
subprocess.Popen(["VBoxManage", "startvm", sys.argv[1], "--type", "headless"], stdout=subprocess.PIPE)
print "Please wait for the machine to boot before trying to connect again."
return
else:
return
virtualbox_ip = subprocess.Popen(["VBoxManage", "guestproperty", "get", sys.argv[1], "/VirtualBox/GuestInfo/Net/0/V4/IP"], stdout=subprocess.PIPE)
ip_response = virtualbox_ip.communicate()[0]
if ip_response == "No value set!\n":
print "Could not find the virtual machine's IP address. Are network settings configured correctly and are VirtualBox Guest additions installed on the virtual machine?"
return
if find_host(sys.argv[1]):
if host_outdated(ip_response.split()[1], sys.argv[1]):
hosts_input = raw_input("/etc/hosts has an outdated entry for this virtual machine. Would you like to update it? [Y/n] ")
if len(hosts_input) == 0 or hosts_input == "Y" or hosts_input == "y": # If the user responds in the affirmative
update_host(ip_response.split()[1], sys.argv[1])
else:
hosts_input = raw_input("/etc/hosts does not have an entry for this virtual machine. Would you like to add one? [Y/n] ")
if len(hosts_input) == 0 or hosts_input == "Y" or hosts_input == "y": # If the user responds in the affirmative
add_host(ip_response.split()[1], sys.argv[1])
os.system("ssh " + ip_response.split()[1])
else:
print "If your virtual machine's name contains spaces, please wrap it in quotes."
return
main() # Run the function so the module is useful in a CLI
|
BeATz-UnKNoWN/python-for-android | refs/heads/master | python3-alpha/python3-src/Lib/test/test_keywordonlyarg.py | 49 | #!/usr/bin/env python3
"""Unit tests for the keyword only argument specified in PEP 3102."""
__author__ = "Jiwon Seo"
__email__ = "seojiwon at gmail dot com"
import unittest
from test.support import run_unittest
def posonly_sum(pos_arg1, *arg, **kwarg):
return pos_arg1 + sum(arg) + sum(kwarg.values())
def keywordonly_sum(*, k1=0, k2):
return k1 + k2
def keywordonly_nodefaults_sum(*, k1, k2):
return k1 + k2
def keywordonly_and_kwarg_sum(*, k1, k2, **kwarg):
return k1 + k2 + sum(kwarg.values())
def mixedargs_sum(a, b=0, *arg, k1, k2=0):
return a + b + k1 + k2 + sum(arg)
def mixedargs_sum2(a, b=0, *arg, k1, k2=0, **kwargs):
return a + b + k1 + k2 + sum(arg) + sum(kwargs.values())
def sortnum(*nums, reverse=False):
return sorted(list(nums), reverse=reverse)
def sortwords(*words, reverse=False, **kwargs):
return sorted(list(words), reverse=reverse)
class Foo:
def __init__(self, *, k1, k2=0):
self.k1 = k1
self.k2 = k2
def set(self, p1, *, k1, k2):
self.k1 = k1
self.k2 = k2
def sum(self):
return self.k1 + self.k2
class KeywordOnlyArgTestCase(unittest.TestCase):
def assertRaisesSyntaxError(self, codestr):
def shouldRaiseSyntaxError(s):
compile(s, "<test>", "single")
self.assertRaises(SyntaxError, shouldRaiseSyntaxError, codestr)
def testSyntaxErrorForFunctionDefinition(self):
self.assertRaisesSyntaxError("def f(p, *):\n pass\n")
self.assertRaisesSyntaxError("def f(p1, *, p1=100):\n pass\n")
self.assertRaisesSyntaxError("def f(p1, *k1, k1=100):\n pass\n")
self.assertRaisesSyntaxError("def f(p1, *, k1, k1=100):\n pass\n")
self.assertRaisesSyntaxError("def f(p1, *, **k1):\n pass\n")
self.assertRaisesSyntaxError("def f(p1, *, k1, **k1):\n pass\n")
self.assertRaisesSyntaxError("def f(p1, *, None, **k1):\n pass\n")
self.assertRaisesSyntaxError("def f(p, *, (k1, k2), **kw):\n pass\n")
def testSyntaxForManyArguments(self):
fundef = "def f("
for i in range(255):
fundef += "i%d, "%i
fundef += "*, key=100):\n pass\n"
self.assertRaisesSyntaxError(fundef)
fundef2 = "def foo(i,*,"
for i in range(255):
fundef2 += "i%d, "%i
fundef2 += "lastarg):\n pass\n"
self.assertRaisesSyntaxError(fundef2)
# exactly 255 arguments, should compile ok
fundef3 = "def f(i,*,"
for i in range(253):
fundef3 += "i%d, "%i
fundef3 += "lastarg):\n pass\n"
compile(fundef3, "<test>", "single")
def testTooManyPositionalErrorMessage(self):
def f(a, b=None, *, c=None):
pass
with self.assertRaises(TypeError) as exc:
f(1, 2, 3)
expected = "f() takes at most 2 positional arguments (3 given)"
self.assertEqual(str(exc.exception), expected)
def testSyntaxErrorForFunctionCall(self):
self.assertRaisesSyntaxError("f(p, k=1, p2)")
self.assertRaisesSyntaxError("f(p, k1=50, *(1,2), k1=100)")
def testRaiseErrorFuncallWithUnexpectedKeywordArgument(self):
self.assertRaises(TypeError, keywordonly_sum, ())
self.assertRaises(TypeError, keywordonly_nodefaults_sum, ())
self.assertRaises(TypeError, Foo, ())
try:
keywordonly_sum(k2=100, non_existing_arg=200)
self.fail("should raise TypeError")
except TypeError:
pass
try:
keywordonly_nodefaults_sum(k2=2)
self.fail("should raise TypeError")
except TypeError:
pass
def testFunctionCall(self):
self.assertEqual(1, posonly_sum(1))
self.assertEqual(1+2, posonly_sum(1,**{"2":2}))
self.assertEqual(1+2+3, posonly_sum(1,*(2,3)))
self.assertEqual(1+2+3+4, posonly_sum(1,*(2,3),**{"4":4}))
self.assertEqual(1, keywordonly_sum(k2=1))
self.assertEqual(1+2, keywordonly_sum(k1=1, k2=2))
self.assertEqual(1+2, keywordonly_and_kwarg_sum(k1=1, k2=2))
self.assertEqual(1+2+3, keywordonly_and_kwarg_sum(k1=1, k2=2, k3=3))
self.assertEqual(1+2+3+4,
keywordonly_and_kwarg_sum(k1=1, k2=2,
**{"a":3,"b":4}))
self.assertEqual(1+2, mixedargs_sum(1, k1=2))
self.assertEqual(1+2+3, mixedargs_sum(1, 2, k1=3))
self.assertEqual(1+2+3+4, mixedargs_sum(1, 2, k1=3, k2=4))
self.assertEqual(1+2+3+4+5, mixedargs_sum(1, 2, 3, k1=4, k2=5))
self.assertEqual(1+2, mixedargs_sum2(1, k1=2))
self.assertEqual(1+2+3, mixedargs_sum2(1, 2, k1=3))
self.assertEqual(1+2+3+4, mixedargs_sum2(1, 2, k1=3, k2=4))
self.assertEqual(1+2+3+4+5, mixedargs_sum2(1, 2, 3, k1=4, k2=5))
self.assertEqual(1+2+3+4+5+6,
mixedargs_sum2(1, 2, 3, k1=4, k2=5, k3=6))
self.assertEqual(1+2+3+4+5+6,
mixedargs_sum2(1, 2, 3, k1=4, **{'k2':5, 'k3':6}))
self.assertEqual(1, Foo(k1=1).sum())
self.assertEqual(1+2, Foo(k1=1,k2=2).sum())
self.assertEqual([1,2,3], sortnum(3,2,1))
self.assertEqual([3,2,1], sortnum(1,2,3, reverse=True))
self.assertEqual(['a','b','c'], sortwords('a','c','b'))
self.assertEqual(['c','b','a'], sortwords('a','c','b', reverse=True))
self.assertEqual(['c','b','a'],
sortwords('a','c','b', reverse=True, ignore='ignore'))
def testKwDefaults(self):
def foo(p1,p2=0, *, k1, k2=0):
return p1 + p2 + k1 + k2
self.assertEqual(2, foo.__code__.co_kwonlyargcount)
self.assertEqual({"k2":0}, foo.__kwdefaults__)
foo.__kwdefaults__ = {"k1":0}
try:
foo(1,k1=10)
self.fail("__kwdefaults__ is not properly changed")
except TypeError:
pass
def test_kwonly_methods(self):
class Example:
def f(self, *, k1=1, k2=2):
return k1, k2
self.assertEqual(Example().f(k1=1, k2=2), (1, 2))
self.assertEqual(Example.f(Example(), k1=1, k2=2), (1, 2))
self.assertRaises(TypeError, Example.f, k1=1, k2=2)
def test_main():
run_unittest(KeywordOnlyArgTestCase)
if __name__ == "__main__":
test_main()
|
ewbankkit/cloud-custodian | refs/heads/master | tools/sandbox/c7n_sphere11/c7n_sphere11/admin.py | 6 | # Copyright 2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import json
import pprint
import re
import time
import boto3
import click
import tabulate
import yaml
from c7n_sphere11.cli import BASE_URL
from c7n_sphere11.client import Client
from c7n.utils import local_session
@click.group()
def admin():
"""Sphere11, resource locks"""
@admin.command()
@click.option('--config')
def format_json(config):
"""format config for lambda exec
"""
with open(config) as fh:
print(json.dumps(yaml.safe_load(fh.read()), indent=2))
def render_metrics(header, values):
if not values:
return
click.echo(
"".join((
" ",
header.ljust(20),
("min:%0.1f" % min(values)).ljust(12),
("max:%0.1f" % max(values)).ljust(12),
raster_metrics(values))))
def raster_metrics(data):
BARS = u'▁▂▃▄▅▆▇█'
incr = min(data)
width = (max(data) - min(data)) / (len(BARS) - 1)
bins = [i * width + incr for i in range(len(BARS))]
indexes = [i for n in data
for i, thres in enumerate(bins)
if thres <= n < thres + width]
return ''.join(BARS[i] for i in indexes)
@admin.command()
def check():
"""Sanity check api deployment
"""
t = time.time()
results = Client(BASE_URL).version()
print("Endpoint", BASE_URL)
print("Response Time %0.2f" % (time.time() - t))
print("Headers")
for k, v in results.headers.items():
print(" %s: %s" % (k, v))
print("Body")
print(results.text)
@admin.command()
@click.option('--function', help='function name', required=True)
@click.option('--api', help='api name')
@click.option(
'-s', '--start', help='relative time to start from', default="1h")
@click.option(
'-p', '--period', help='metrics period', default="1m")
def metrics(function, api, start, period):
"""lambda/api/db metrics"""
from c7n.mu import LambdaManager
manager = LambdaManager(boto3.Session)
start = parse_date(start)
period = int(abs(parse_timedelta(period).total_seconds()))
print("Lambda Metrics")
metrics = manager.metrics(
[{'FunctionName': function}],
start=start, end=datetime.utcnow(),
period=period)
for k in ('Invocations', 'Throttles', 'Errors'):
values = [n['Sum'] for n in metrics[0][k]]
render_metrics(k, values)
if not api:
return
print("Api Metrics")
metrics = gateway_metrics(
boto3.Session, api, "latest", start, datetime.utcnow(), period)
for k, data in metrics.items():
if "Count" in k:
values = [n['Sum'] for n in data]
else:
values = [n['Average'] for n in data]
render_metrics(k, values)
print("Db Metrics")
metrics = db_metrics(
boto3.Session, "Sphere11.Dev.ResourceLocks",
start, datetime.utcnow(), period)
for k, data in metrics.items():
values = [n['Average'] for n in data]
render_metrics(k, values)
def db_metrics(session_factory, table_name, start, end, period):
metrics = local_session(session_factory).client('cloudwatch')
values = {}
for m in (
"ConsumedReadCapacityUnits",
"ConsumedWriteCapacityUnits",
"ThrottledRequests",
"ReadThrottleEvents",
"WriteThrottleEvents",
"ReturnedItemCount",
"SuccessfulRequestLatency"
# "ReturnedRecordsCount"
):
values[m.replace('Capacity', '')] = metrics.get_metric_statistics(
Namespace="AWS/DynamoDB",
Dimensions=[
{'Name': 'TableName', 'Value': table_name}
],
Statistics=["Average"],
StartTime=start,
EndTime=end,
Period=period,
MetricName=m)['Datapoints']
return values
def gateway_metrics(session_factory, gateway_id, stage_name, start, end, period):
metrics = local_session(session_factory).client('cloudwatch')
values = {}
for m in ("4XXError", "5XError",
"CacheHitCount", "CacheMissCount",
"Count",
"IntegrationLatency", "Latency"):
values[m] = metrics.get_metric_statistics(
Namespace="AWS/ApiGateway",
Dimensions=[
{'Name': 'ApiName', 'Value': gateway_id},
{'Name': 'Stage', 'Value': stage_name},
],
Statistics=["Average", "Sum"],
StartTime=start,
EndTime=end,
Period=period,
MetricName=m)['Datapoints']
return values
def parse_timedelta(datetime_text, default=timedelta(seconds=60 * 5 * -1)):
# from awslogs script
ago_regexp = r'(\d+)\s?(m|minute|minutes|h|hour|hours|d|day|days|w|weeks|weeks)(?: ago)?'
ago_match = re.match(ago_regexp, datetime_text)
if ago_match:
amount, unit = ago_match.groups()
amount = int(amount)
unit = {'m': 60, 'h': 3600, 'd': 86400, 'w': 604800}[unit[0]]
delta = timedelta(seconds=unit * amount * -1)
else:
delta = -default
return delta
def parse_date(datetime_text):
return datetime.utcnow() + parse_timedelta(datetime_text)
@admin.command()
@click.option('--account-id', help='account id')
def records(account_id):
"""Fetch locks data
"""
s = boto3.Session()
table = s.resource('dynamodb').Table('Sphere11.Dev.ResourceLocks')
results = table.scan()
for r in results['Items']:
if 'LockDate' in r:
r['LockDate'] = datetime.fromtimestamp(r['LockDate'])
if 'RevisionDate' in r:
r['RevisionDate'] = datetime.fromtimestamp(r['RevisionDate'])
print(tabulate.tabulate(
results['Items'],
headers="keys",
tablefmt='fancy_grid'))
@admin.command()
@click.option('--function', help='function name', required=True)
def flush_pending(function):
"""Attempt to acquire any pending locks.
"""
s = boto3.Session()
client = s.client('lambda')
results = client.invoke(
FunctionName=function,
Payload=json.dumps({'detail-type': 'Scheduled Event'})
)
content = results.pop('Payload').read()
pprint.pprint(results)
pprint.pprint(json.loads(content))
@admin.command()
def config_status():
""" Check config status in an account.
"""
s = boto3.Session()
client = s.client('config')
channels = client.describe_delivery_channel_status()[
'DeliveryChannelsStatus']
for c in channels:
print(yaml.safe_dump({
c['name']: dict(
snapshot=str(
c['configSnapshotDeliveryInfo'].get('lastSuccessfulTime')),
history=str(
c['configHistoryDeliveryInfo'].get('lastSuccessfulTime')),
stream=str(
c['configStreamDeliveryInfo'].get('lastStatusChangeTime'))
),
}, default_flow_style=False))
@admin.command()
@click.option('--account-id', required=True)
@click.option('--region', required=True)
def delta(account_id, region):
print(Client(BASE_URL).delta(account_id, region).text)
@admin.command()
@click.option('--reload/--no-reload', default=True)
@click.option('--port', default=8080)
def local(reload, port):
"""run local app server, assumes into the account
"""
import logging
from bottle import run
from app import controller, app
from c7n.resources import load_resources
load_resources()
print("Loaded resources definitions")
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('botocore').setLevel(logging.WARNING)
if controller.db.provision():
print("Table Created")
run(app, reloader=reload, port=port)
if __name__ == '__main__':
admin()
|
ychen820/microblog | refs/heads/master | flask/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/compat.py | 2942 | ######################## BEGIN LICENSE BLOCK ########################
# Contributor(s):
# Ian Cordasco - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
if sys.version_info < (3, 0):
base_str = (str, unicode)
else:
base_str = (bytes, str)
def wrap_ord(a):
if sys.version_info < (3, 0) and isinstance(a, base_str):
return ord(a)
else:
return a
|
chetan51/nupic | refs/heads/master | nupic/frameworks/opf/opfhelpers.py | 15 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
# This file contains utility functions that are may be imported
# by clients of the framework. Functions that are used only by
# the prediction framework should be in opfutils.py
#
# TODO: Rename as helpers.py once we're ready to replace the legacy
# helpers.py
import imp
import os
import expdescriptionapi
def loadExperiment(path):
"""Loads the experiment description file from the path.
Args:
path: The path to a directory containing a description.py file or the file
itself.
Returns:
(config, control)
"""
if not os.path.isdir(path):
path = os.path.dirname(path)
descriptionPyModule = loadExperimentDescriptionScriptFromDir(path)
expIface = getExperimentDescriptionInterfaceFromModule(descriptionPyModule)
return expIface.getModelDescription(), expIface.getModelControl()
def loadExperimentDescriptionScriptFromDir(experimentDir):
""" Loads the experiment description python script from the given experiment
directory.
experimentDir: experiment directory path
Returns: module of the loaded experiment description scripts
"""
descriptionScriptPath = os.path.join(experimentDir, "description.py")
module = _loadDescriptionFile(descriptionScriptPath)
return module
def getExperimentDescriptionInterfaceFromModule(module):
"""
module: imported description.py module
Returns: An expdescriptionapi.DescriptionIface-based instance that
represents the experiment description
"""
result = module.descriptionInterface
assert isinstance(result, expdescriptionapi.DescriptionIface), \
"expected DescriptionIface-based instance, but got %s" % type(result)
return result
g_descriptionImportCount = 0
def _loadDescriptionFile(descriptionPyPath):
"""Loads a description file and returns it as a module.
descriptionPyPath: path of description.py file to load
"""
global g_descriptionImportCount
if not os.path.isfile(descriptionPyPath):
raise RuntimeError(("Experiment description file %s does not exist or " + \
"is not a file") % (descriptionPyPath,))
mod = imp.load_source("pf_description%d" % g_descriptionImportCount,
descriptionPyPath)
g_descriptionImportCount += 1
if not hasattr(mod, "descriptionInterface"):
raise RuntimeError("Experiment description file %s does not define %s" % \
(descriptionPyPath, "descriptionInterface"))
if not isinstance(mod.descriptionInterface, expdescriptionapi.DescriptionIface):
raise RuntimeError(("Experiment description file %s defines %s but it " + \
"is not DescriptionIface-based") % \
(descriptionPyPath, name))
return mod
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.