repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
zasdfgbnm/tensorflow
|
refs/heads/master
|
tensorflow/contrib/training/python/training/resample_test.py
|
107
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import numpy
from tensorflow.contrib.training.python.training import resample
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class ResampleTest(test.TestCase):
"""Tests that resampling runs and outputs are close to expected values."""
def testRepeatRange(self):
cases = [
([], []),
([0], []),
([1], [0]),
([1, 0], [0]),
([0, 1], [1]),
([3], [0, 0, 0]),
([0, 1, 2, 3], [1, 2, 2, 3, 3, 3]),
]
with self.test_session() as sess:
for inputs, expected in cases:
array_inputs = numpy.array(inputs, dtype=numpy.int32)
actual = sess.run(resample._repeat_range(array_inputs))
self.assertAllEqual(actual, expected)
def testRoundtrip(self, rate=0.25, count=5, n=500):
"""Tests `resample(x, weights)` and resample(resample(x, rate), 1/rate)`."""
foo = self.get_values(count)
bar = self.get_values(count)
weights = self.get_weights(count)
resampled_in, rates = resample.weighted_resample(
[foo, bar], constant_op.constant(weights), rate, seed=123)
resampled_back_out = resample.resample_at_rate(
resampled_in, 1.0 / rates, seed=456)
init = control_flow_ops.group(variables.local_variables_initializer(),
variables.global_variables_initializer())
with self.test_session() as s:
s.run(init) # initialize
# outputs
counts_resampled = collections.Counter()
counts_reresampled = collections.Counter()
for _ in range(n):
resampled_vs, reresampled_vs = s.run([resampled_in, resampled_back_out])
self.assertAllEqual(resampled_vs[0], resampled_vs[1])
self.assertAllEqual(reresampled_vs[0], reresampled_vs[1])
for v in resampled_vs[0]:
counts_resampled[v] += 1
for v in reresampled_vs[0]:
counts_reresampled[v] += 1
# assert that resampling worked as expected
self.assert_expected(weights, rate, counts_resampled, n)
# and that re-resampling gives the approx identity.
self.assert_expected(
[1.0 for _ in weights],
1.0,
counts_reresampled,
n,
abs_delta=0.1 * n * count)
def testCorrectRates(self, rate=0.25, count=10, n=500, rtol=0.1):
"""Tests that the rates returned by weighted_resample are correct."""
# The approach here is to verify that:
# - sum(1/rate) approximates the size of the original collection
# - sum(1/rate * value) approximates the sum of the original inputs,
# - sum(1/rate * value)/sum(1/rate) approximates the mean.
vals = self.get_values(count)
weights = self.get_weights(count)
resampled, rates = resample.weighted_resample([vals],
constant_op.constant(weights),
rate)
invrates = 1.0 / rates
init = control_flow_ops.group(variables.local_variables_initializer(),
variables.global_variables_initializer())
expected_sum_op = math_ops.reduce_sum(vals)
with self.test_session() as s:
s.run(init)
expected_sum = n * s.run(expected_sum_op)
weight_sum = 0.0
weighted_value_sum = 0.0
for _ in range(n):
val, inv_rate = s.run([resampled[0], invrates])
weight_sum += sum(inv_rate)
weighted_value_sum += sum(val * inv_rate)
# sum(inv_rate) ~= N*count:
expected_count = count * n
self.assertAlmostEqual(
expected_count, weight_sum, delta=(rtol * expected_count))
# sum(vals) * n ~= weighted_sum(resampled, 1.0/weights)
self.assertAlmostEqual(
expected_sum, weighted_value_sum, delta=(rtol * expected_sum))
# Mean ~= weighted mean:
expected_mean = expected_sum / float(n * count)
self.assertAlmostEqual(
expected_mean,
weighted_value_sum / weight_sum,
delta=(rtol * expected_mean))
def testZeroRateUnknownShapes(self, count=10):
"""Tests that resampling runs with completely runtime shapes."""
# Use placeholcers without shape set:
vals = array_ops.placeholder(dtype=dtypes.int32)
rates = array_ops.placeholder(dtype=dtypes.float32)
resampled = resample.resample_at_rate([vals], rates)
with self.test_session() as s:
rs, = s.run(resampled, {
vals: list(range(count)),
rates: numpy.zeros(
shape=[count], dtype=numpy.float32)
})
self.assertEqual(rs.shape, (0,))
def testDtypes(self, count=10):
"""Test that we can define the ops with float64 weights."""
vals = self.get_values(count)
weights = math_ops.cast(self.get_weights(count), dtypes.float64)
# should not error:
resample.resample_at_rate([vals], weights)
resample.weighted_resample(
[vals], weights, overall_rate=math_ops.cast(1.0, dtypes.float64))
def get_weights(self, n, mean=10.0, stddev=5):
"""Returns random positive weight values."""
assert mean > 0, 'Weights have to be positive.'
results = []
while len(results) < n:
v = numpy.random.normal(mean, stddev)
if v > 0:
results.append(v)
return results
def get_values(self, n):
return constant_op.constant(list(range(n)))
def assert_expected(self,
weights,
overall_rate,
counts,
n,
tol=2.0,
abs_delta=0):
# Overall, we expect sum(counts) there to be `overall_rate` * n *
# len(weights)... with a stddev on that expectation equivalent to
# performing (n * len(weights)) trials each with probability of
# overall_rate.
expected_overall_count = len(weights) * n * overall_rate
actual_overall_count = sum(counts.values())
stddev = math.sqrt(len(weights) * n * overall_rate * (1 - overall_rate))
self.assertAlmostEqual(
expected_overall_count,
actual_overall_count,
delta=(stddev * tol + abs_delta))
# And we can form a similar expectation for each item -- it should
# appear in the results a number of time proportional to its
# weight, which is similar to performing `expected_overall_count`
# trials each with a probability of weight/weight_sum.
weight_sum = sum(weights)
fractions = [w / weight_sum for w in weights]
expected_counts = [expected_overall_count * f for f in fractions]
stddevs = [
math.sqrt(expected_overall_count * f * (1 - f)) for f in fractions
]
for i in range(len(expected_counts)):
expected_count = expected_counts[i]
actual_count = counts[i]
self.assertAlmostEqual(
expected_count, actual_count, delta=(stddevs[i] * tol + abs_delta))
if __name__ == '__main__':
test.main()
|
CARocha/django-import-export
|
refs/heads/master
|
tests/settings.py
|
1
|
import os
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'import_export',
'core',
]
SITE_ID = 1
ROOT_URLCONF = "urls"
DEBUG = True
STATIC_URL = '/static/'
if os.environ.get('IMPORT_EXPORT_TEST_TYPE') == 'mysql-innodb':
IMPORT_EXPORT_USE_TRANSACTIONS = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'import_export_test',
'USER': os.environ.get('IMPORT_EXPORT_MYSQL_USER', 'root'),
'OPTIONS': {
'init_command': 'SET storage_engine=INNODB',
}
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(os.path.dirname(__file__), 'database.db'),
}
}
|
vpstudios/Codecademy-Exercise-Answers
|
refs/heads/master
|
Language Skills/Python/Unit 7/1-Lists and Functions/Using Lists of Lists in Functions/17-Using two lists as two arguments in a function.py
|
2
|
m = [1, 2, 3]
n = [4, 5, 6]
n = [3, 5, 7]
# Add your code here!
def join_lists(x, y):
return x + y
print join_lists(m, n)
# You want this to print [1, 2, 3, 4, 5, 6]
|
alshedivat/tensorflow
|
refs/heads/master
|
tensorflow/contrib/data/python/kernel_tests/assert_element_shape_test.py
|
8
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.data.python.ops import batching
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.platform import test
class AssertElementShapeTest(test_base.DatasetTestBase):
def test_assert_element_shape(self):
def create_dataset(_):
return (array_ops.ones(2, dtype=dtypes.float32),
array_ops.zeros((3, 4), dtype=dtypes.int32))
dataset = dataset_ops.Dataset.range(5).map(create_dataset)
expected_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((3, 4)))
self.assertEqual(expected_shapes, dataset.output_shapes)
result = dataset.apply(batching.assert_element_shape(expected_shapes))
self.assertEqual(expected_shapes, result.output_shapes)
iterator = result.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for _ in range(5):
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def test_assert_wrong_element_shape(self):
def create_dataset(_):
return (array_ops.ones(2, dtype=dtypes.float32),
array_ops.zeros((3, 4), dtype=dtypes.int32))
dataset = dataset_ops.Dataset.range(3).map(create_dataset)
wrong_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((3, 10)))
with self.assertRaises(ValueError):
dataset.apply(batching.assert_element_shape(wrong_shapes))
def test_assert_element_shape_on_unknown_shape_dataset(self):
def create_unknown_shape_dataset(x):
return script_ops.py_func(
lambda _: ( # pylint: disable=g-long-lambda
np.ones(2, dtype=np.float32),
np.zeros((3, 4), dtype=np.int32)),
[x],
[dtypes.float32, dtypes.int32])
dataset = dataset_ops.Dataset.range(5).map(create_unknown_shape_dataset)
unknown_shapes = (tensor_shape.TensorShape(None),
tensor_shape.TensorShape(None))
self.assertEqual(unknown_shapes, dataset.output_shapes)
expected_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((3, 4)))
result = dataset.apply(batching.assert_element_shape(expected_shapes))
self.assertEqual(expected_shapes, result.output_shapes)
iterator = result.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for _ in range(5):
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def test_assert_wrong_element_shape_on_unknown_shape_dataset(self):
def create_unknown_shape_dataset(x):
return script_ops.py_func(
lambda _: ( # pylint: disable=g-long-lambda
np.ones(2, dtype=np.float32),
np.zeros((3, 4), dtype=np.int32)),
[x],
[dtypes.float32, dtypes.int32])
dataset = dataset_ops.Dataset.range(3).map(create_unknown_shape_dataset)
unknown_shapes = (tensor_shape.TensorShape(None),
tensor_shape.TensorShape(None))
self.assertEqual(unknown_shapes, dataset.output_shapes)
wrong_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((3, 10)))
iterator = (
dataset.apply(batching.assert_element_shape(wrong_shapes))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next)
def test_assert_partial_element_shape(self):
def create_dataset(_):
return (array_ops.ones(2, dtype=dtypes.float32),
array_ops.zeros((3, 4), dtype=dtypes.int32))
dataset = dataset_ops.Dataset.range(5).map(create_dataset)
partial_expected_shape = (
tensor_shape.TensorShape(None), # Unknown shape
tensor_shape.TensorShape((None, 4))) # Partial shape
result = dataset.apply(
batching.assert_element_shape(partial_expected_shape))
# Partial shapes are merged with actual shapes:
actual_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((3, 4)))
self.assertEqual(actual_shapes, result.output_shapes)
iterator = result.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for _ in range(5):
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def test_assert_wrong_partial_element_shape(self):
def create_dataset(_):
return (array_ops.ones(2, dtype=dtypes.float32),
array_ops.zeros((3, 4), dtype=dtypes.int32))
dataset = dataset_ops.Dataset.range(3).map(create_dataset)
wrong_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((None, 10)))
with self.assertRaises(ValueError):
dataset.apply(batching.assert_element_shape(wrong_shapes))
def test_assert_partial_element_shape_on_unknown_shape_dataset(self):
def create_unknown_shape_dataset(x):
return script_ops.py_func(
lambda _: ( # pylint: disable=g-long-lambda
np.ones(2, dtype=np.float32),
np.zeros((3, 4), dtype=np.int32)),
[x],
[dtypes.float32, dtypes.int32])
dataset = dataset_ops.Dataset.range(5).map(create_unknown_shape_dataset)
unknown_shapes = (tensor_shape.TensorShape(None),
tensor_shape.TensorShape(None))
self.assertEqual(unknown_shapes, dataset.output_shapes)
expected_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((None, 4)))
result = dataset.apply(batching.assert_element_shape(expected_shapes))
self.assertEqual(expected_shapes, result.output_shapes)
iterator = result.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for _ in range(5):
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def test_assert_wrong_partial_element_shape_on_unknown_shape_dataset(self):
def create_unknown_shape_dataset(x):
return script_ops.py_func(
lambda _: ( # pylint: disable=g-long-lambda
np.ones(2, dtype=np.float32),
np.zeros((3, 4), dtype=np.int32)),
[x],
[dtypes.float32, dtypes.int32])
dataset = dataset_ops.Dataset.range(3).map(create_unknown_shape_dataset)
unknown_shapes = (tensor_shape.TensorShape(None),
tensor_shape.TensorShape(None))
self.assertEqual(unknown_shapes, dataset.output_shapes)
wrong_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((None, 10)))
iterator = (
dataset.apply(batching.assert_element_shape(wrong_shapes))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next)
if __name__ == "__main__":
test.main()
|
zero-db/zerodb
|
refs/heads/master
|
setup.py
|
2
|
import sys
import os
import platform
import subprocess
import errno
import tempfile
from distutils import ccompiler, log
from setuptools import setup, find_packages
version = '0.99.0b1'
INSTALL_REQUIRES = [
'BTrees',
'zope.component>=4.0.0',
'zodbpickle',
'ZODB>=5.0.0a6',
'ZEO>=5.0.0a2',
'zope.index>=4.0.0',
'zerodbext.catalog==0.8.4',
'cachetools',
'zc.zlibstorage>=1.1.0',
'mock',
'requests>=2.0',
'zope.event>=4.0.0',
'zope.lifecycleevent>=4.0.0',
'six>=1.7.0',
'scrypt'
]
TESTS_REQUIRE = [
'pytest',
'coverage',
'path.py',
'mock',
'wheel',
'pytest-cov',
'pdbpp',
'zope.testing'
]
entry_points = """
[console_scripts]
zerodb-initdb = zerodb.permissions.base:init_db_script
"""
# The following is to avoid build errors on brand new Amazon Ubuntu
# instances which may not have libffi-dev installed.
# Function copied from cffi 1.5.2
def _ask_pkg_config(resultlist, option, result_prefix='', sysroot=False):
pkg_config = os.environ.get('PKG_CONFIG', 'pkg-config')
try:
p = subprocess.Popen([pkg_config, option, 'libffi'],
stdout=subprocess.PIPE)
except OSError as e:
if e.errno not in [errno.ENOENT, errno.EACCES]:
raise
else:
t = p.stdout.read().decode().strip()
p.stdout.close()
if p.wait() == 0:
res = t.split()
res = [x[len(result_prefix):] for x in res
if x.startswith(result_prefix)]
sysroot = sysroot and os.environ.get('PKG_CONFIG_SYSROOT_DIR', '')
if sysroot:
# old versions of pkg-config don't support this env var,
# so here we emulate its effect if needed
res = [x if x.startswith(sysroot) else sysroot + x for x in res]
resultlist[:] = res
def can_build_cffi():
# Windows hopefully grabs binary wheels
if sys.platform == "win32":
return True
# Include dirs copied from cffi 1.5.2
include_dirs = ["/usr/include/ffi", "/usr/include/libffi"]
_ask_pkg_config(include_dirs, "--cflags-only-I", "-I", sysroot=True)
if "freebsd" in sys.platform:
include_dirs.append("/usr/local/include")
cc = ccompiler.new_compiler()
cc.include_dirs = [str(x) for x in include_dirs] # PY2
with tempfile.NamedTemporaryFile(mode="wt", suffix=".c") as f:
f.write('#include "ffi.h"\nvoid f(){}\n')
f.flush()
try:
cc.compile([f.name])
return True
except ccompiler.CompileError:
return False
# If we don't have ffi.h we fall back to pycryptodome.
# Note that the warning is only visible if pip is run with -v.
def have_pycrypto():
try:
import Crypto
return True
except ImportError:
return False
def have_pycryptodome():
try:
from Crypto.Cipher.AES import MODE_GCM
return True
except ImportError:
return False
def have_aesni():
if have_pycryptodome():
from Crypto.Cipher.AES import _raw_aesni_lib
return _raw_aesni_lib is not None
else:
try:
with open("/proc/cpuinfo", "r") as f:
info = f.read()
except IOError:
info = None
if (info is None) or ("aes" in info):
# If we have a platform w/o cpuinfo, assume we have AESNI
# Perhaps, should call sysctl in OSX
return True
else:
return False
def have_sodium_wheel():
return ((platform.system() == "Darwin") and
(platform.mac_ver()[0].startswith("10.10")))
if have_aesni():
if have_sodium_wheel() or can_build_cffi():
INSTALL_REQUIRES.append("aes256gcm-nacl")
if have_pycrypto() and not have_pycryptodome():
INSTALL_REQUIRES.append("pycrypto")
else:
INSTALL_REQUIRES.append("pycryptodome")
else:
INSTALL_REQUIRES.append("pycryptodome")
log.warn(
"WARNING: ffi.h not found: aes256gcm-nacl optimization disabled")
else:
INSTALL_REQUIRES.append("pycryptodome")
setup(
name="zerodb",
version=version,
description="End-to-end encrypted database",
author="ZeroDB Inc.",
author_email="michael@zerodb.io",
license="AGPLv3",
url="http://zerodb.io",
packages=find_packages(),
package_data={'zerodb.permissions': ['nobody-key.pem', 'nobody.pem']},
include_package_data=True,
install_requires=INSTALL_REQUIRES,
extras_require={'testing': TESTS_REQUIRE},
entry_points=entry_points,
)
|
elipavlov/transport-nov-parser
|
refs/heads/master
|
src/tn_parser/transport/tests.py
|
24123
|
from django.test import TestCase
# Create your tests here.
|
apavlo/h-store
|
refs/heads/master
|
third_party/python/boto/manage/propget.py
|
115
|
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
def get(prop, choices=None):
prompt = prop.verbose_name
if not prompt:
prompt = prop.name
if choices:
if callable(choices):
choices = choices()
else:
choices = prop.get_choices()
valid = False
while not valid:
if choices:
min = 1
max = len(choices)
for i in range(min, max+1):
value = choices[i-1]
if isinstance(value, tuple):
value = value[0]
print '[%d] %s' % (i, value)
value = raw_input('%s [%d-%d]: ' % (prompt, min, max))
try:
int_value = int(value)
value = choices[int_value-1]
if isinstance(value, tuple):
value = value[1]
valid = True
except ValueError:
print '%s is not a valid choice' % value
except IndexError:
print '%s is not within the range[%d-%d]' % (min, max)
else:
value = raw_input('%s: ' % prompt)
try:
value = prop.validate(value)
if prop.empty(value) and prop.required:
print 'A value is required'
else:
valid = True
except:
print 'Invalid value: %s' % value
return value
|
miracle2k/stgit
|
refs/heads/master
|
stgit/commands/rebase.py
|
1
|
__copyright__ = """
Copyright (C) 2005, Catalin Marinas <catalin.marinas@gmail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import sys, os
from stgit.argparse import opt
from stgit.commands.common import *
from stgit.utils import *
from stgit import argparse, stack, git
help = 'Move the stack base to another point in history'
kind = 'stack'
usage = ['[options] <new-base-id>']
description = """
Pop all patches from current stack, move the stack base to the given
<new-base-id> and push the patches back.
If you experience merge conflicts, resolve the problem and continue
the rebase by executing the following sequence:
$ git add --update
$ stg refresh
$ stg goto top-patch
Or if you want to skip that patch:
$ stg undo --hard
$ stg push next-patch..top-patch"""
args = [argparse.commit]
options = [
opt('-n', '--nopush', action = 'store_true',
short = 'Do not push the patches back after rebasing'),
opt('-m', '--merged', action = 'store_true',
short = 'Check for patches merged upstream')]
directory = DirectoryGotoToplevel(log = True)
def func(parser, options, args):
"""Rebase the current stack
"""
if len(args) != 1:
parser.error('incorrect number of arguments')
if crt_series.get_protected():
raise CmdException, 'This branch is protected. Rebase is not permitted'
check_local_changes()
check_conflicts()
check_head_top_equal(crt_series)
# ensure an exception is raised before popping on non-existent target
if git_id(crt_series, args[0]) == None:
raise GitException, 'Unknown revision: %s' % args[0]
applied = prepare_rebase(crt_series)
rebase(crt_series, args[0])
post_rebase(crt_series, applied, options.nopush, options.merged)
print_crt_patch(crt_series)
|
40223211/logo-toys
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/unittest/__init__.py
|
900
|
"""
Python unit testing framework, based on Erich Gamma's JUnit and Kent Beck's
Smalltalk testing framework.
This module contains the core framework classes that form the basis of
specific test cases and suites (TestCase, TestSuite etc.), and also a
text-based utility class for running the tests and reporting the results
(TextTestRunner).
Simple usage:
import unittest
class IntegerArithmeticTestCase(unittest.TestCase):
def testAdd(self): ## test method names begin 'test*'
self.assertEqual((1 + 2), 3)
self.assertEqual(0 + 1, 1)
def testMultiply(self):
self.assertEqual((0 * 10), 0)
self.assertEqual((5 * 8), 40)
if __name__ == '__main__':
unittest.main()
Further information is available in the bundled documentation, and from
http://docs.python.org/library/unittest.html
Copyright (c) 1999-2003 Steve Purcell
Copyright (c) 2003-2010 Python Software Foundation
This module is free software, and you may redistribute it and/or modify
it under the same terms as Python itself, so long as this copyright message
and disclaimer are retained in their original form.
IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
"""
__all__ = ['TestResult', 'TestCase', 'TestSuite',
'TextTestRunner', 'TestLoader', 'FunctionTestCase', 'main',
'defaultTestLoader', 'SkipTest', 'skip', 'skipIf', 'skipUnless',
'expectedFailure', 'TextTestResult', 'installHandler',
'registerResult', 'removeResult', 'removeHandler']
# Expose obsolete functions for backwards compatibility
__all__.extend(['getTestCaseNames', 'makeSuite', 'findTestCases'])
__unittest = True
from .result import TestResult
from .case import (TestCase, FunctionTestCase, SkipTest, skip, skipIf,
skipUnless, expectedFailure)
from .suite import BaseTestSuite, TestSuite
from .loader import (TestLoader, defaultTestLoader, makeSuite, getTestCaseNames,
findTestCases)
from .main import TestProgram, main
from .runner import TextTestRunner, TextTestResult
from .signals import installHandler, registerResult, removeResult, removeHandler
# deprecated
_TextTestResult = TextTestResult
|
unclehook/py3amulecmd
|
refs/heads/master
|
tests/packet_tests.py
|
1
|
from ec.packet import *
import types
def test_ECPacketData():
assert ECPacketData((0x02,[(0x02, 3000),(0x01, 5)])) == '\x02\x02\x04\x03\x02\x0b\xb8\x02\x02\x01\x05'
def test_ECPacket():
assert ECPacket((0x02, [(0x02, 'test')])) == '\x00\x00\x00\x22\x00\x00\x00\x0A\x02\x01\x04\x06\x05test\x00'
def test_ECLoginPacket():
assert ECLoginPacket("amule-remote", "0x0001", "aaa") == '\x00\x00\x00\x22\x00\x00\x00\x36\x02\x04\xc8\x80\x06\x0d\x61\x6d\x75\x6c\x65\x2d\x72\x65\x6d\x6f\x74\x65\x00\xc8\x82\x06\x07\x30\x78\x30\x30\x30\x31\x00\x04\x03\x02\x02\x00\x02\x09\x10\x47\xbc\xe5\xc7\x4f\x58\x9f\x48\x67\xdb\xd5\x7e\x9c\xa9\xf8\x08'
def test_ReadPacketData():
test_data = '\x02\x04\xc8\x80\x06\x0d\x61\x6d\x75\x6c\x65\x2d\x72\x65\x6d\x6f\x74\x65\x00\xc8\x82\x06\x07\x30\x78\x30\x30\x30\x31\x00\x04\x03\x02\x02\x00\x02\x09\x10\x47\xbc\xe5\xc7\x4f\x58\x9f\x48\x67\xdb\xd5\x7e\x9c\xa9\xf8\x08'
assert ReadPacketData(test_data) == (0x02, [(0x0100, "amule-remote"), (0x0101, "0x0001"), (0x0002, 0x0200), (0x0001, 'G\xbc\xe5\xc7OX\x9fHg\xdb\xd5~\x9c\xa9\xf8\x08')])
|
CEG-FYP-OpenStack/scheduler
|
refs/heads/master
|
nova/api/openstack/compute/legacy_v2/contrib/extended_rescue_with_image.py
|
100
|
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import extensions as exts
class Extended_rescue_with_image(exts.ExtensionDescriptor):
"""Allow the user to specify the image to use for rescue."""
name = "ExtendedRescueWithImage"
alias = "os-extended-rescue-with-image"
namespace = ("http://docs.openstack.org/compute/ext/"
"extended_rescue_with_image/api/v2")
updated = "2014-01-04T00:00:00Z"
|
peterm-itr/edx-platform
|
refs/heads/master
|
lms/djangoapps/instructor_task/tests/factories.py
|
124
|
import json
import factory
from factory.django import DjangoModelFactory
from student.tests.factories import UserFactory as StudentUserFactory
from instructor_task.models import InstructorTask
from celery.states import PENDING
from opaque_keys.edx.locations import SlashSeparatedCourseKey
class InstructorTaskFactory(DjangoModelFactory):
FACTORY_FOR = InstructorTask
task_type = 'rescore_problem'
course_id = SlashSeparatedCourseKey("MITx", "999", "Robot_Super_Course")
task_input = json.dumps({})
task_key = None
task_id = None
task_state = PENDING
task_output = None
requester = factory.SubFactory(StudentUserFactory)
|
sbi-aau/beeng-py
|
refs/heads/master
|
beeng/engine.py
|
1
|
# -*- coding: utf-8 -*-
"""
This module handles the calls to the calculation engine
"""
# ---------------------------------------------------------------------------- #
# Include
# ---------------------------------------------------------------------------- #
import sys
import os
import ctypes
import logging
import platform
import beeng.finder
# ---------------------------------------------------------------------------- #
# Constants
# ---------------------------------------------------------------------------- #
TEXT_CODEC = 'latin-1'
# ---------------------------------------------------------------------------- #
# Class
# ---------------------------------------------------------------------------- #
class Engine:
def __init__(self, be_eng_dll_path=None, buffer_size=1000000, uk_flag=0, check_for_valid_license=True):
# type: (str, int, int, bool) -> None
"""
Constructor
:param be_eng_dll_path: The path to the be calculation engine dll (If set to None we try to automatically find it)
:param buffer_size: The size for the result buffer string (If smaller than the actual result, data will be lost!)
:param uk_flag: Controls whether the results are in english (1) or danish (0)
:param check_for_valid_license: If this is set to true we check if a valid license is installed when loading the engine
"""
# Fields
self.be_eng_dll_path = os.path.abspath(be_eng_dll_path)
self.buffer_size = buffer_size
self.uk_flag = uk_flag
self.check_for_valid_license = check_for_valid_license
self.old_path = os.environ['PATH']
self.engine = None
# Asserts
assert self.uk_flag in (0, 1)
# Find location if be_eng_dll_path is none
if be_eng_dll_path is None:
logging.debug('No path specified for the engine DLL. Trying to find the newest engine.')
self.be_eng_dll_path = beeng.finder.find_engine_location()
logging.debug('Found engine at: {}'.format(self.be_eng_dll_path))
assert self.be_eng_dll_path is not None, 'The calculation engine could not automatically be found'
# Load engine
self.load_engine(self.be_eng_dll_path)
def load_engine(self, path):
# type: (str) -> None
"""
This function loads the calculation engine
"""
# Check interpreter
self.check_interpreter_bits()
# Save old paths
self.old_path = os.environ['PATH']
# Get directory
dir_name, file_name = os.path.split(path)
# Change path (Used so we can load the dll from a directory different from cwd)
os.environ['PATH'] = dir_name + ';' + os.environ['PATH']
# Load engine
self.engine = ctypes.windll.LoadLibrary(file_name)
# Check license if valid
if self.check_for_valid_license == True:
try:
is_valid = self.is_license_valid()
if is_valid == False:
raise Exception('Missing a valid license for the DLL')
except AttributeError as Ex:
logging.debug(Ex)
logging.warning('It is not possible to check if the license is valid so check_for_valid_license will be set to false (Too old engine)')
check_for_valid_license = False
def unload_engine(self):
"""
This function unloads the calculation engine
"""
os.environ['PATH'] = self.old_path
del self.engine
self.engine = None
def check_interpreter_bits(self):
"""
This function ensures that the python interpreter is running in 32 bit mode
"""
if platform.architecture()[0] != '32bit':
raise Exception('This package is only compatible with 32 bit python!')
@staticmethod
def load_model(model_path):
# type: (str) -> None
"""
Opens a xml model file and stores it in a c compatible string buffer (used for returned results)
:param model_path: Path to model file
:return: Project XML C string buffer
"""
f = open(model_path, 'r')
p_model = f.read()
f.close()
if sys.version_info < (3, 0):
# python2
return ctypes.create_string_buffer(p_model)
else:
# python3
return ctypes.create_string_buffer(p_model.encode(TEXT_CODEC))
def get_key_xml(self, model_string_buffer):
# type: (str) -> (bool, str)
"""
Gets the key results
:param model_string_buffer: The model returned by the load_model function
:return: success status (false = failure), XML document C string buffer
"""
# Prepare arguments
mem = ctypes.create_string_buffer(self.buffer_size)
status = ctypes.c_int(ctypes.sizeof(mem))
uk = ctypes.c_int(self.uk_flag)
# Call function
res_code = self.engine.Be06Keys(model_string_buffer, mem, ctypes.byref(status), uk)
# Decode result
key_xml = mem.raw.decode(TEXT_CODEC).strip('\0\n')
# Return result
return res_code != 0, key_xml
def get_res_xml(self, model_string_buffer):
# type: (str) -> (bool, str)
"""
Gets the full results
:param model_string_buffer: The model returned by the load_model function
:return: success status (false = failure), XML document C string buffer
"""
# Prepare arguments
mem = ctypes.create_string_buffer(self.buffer_size)
status = ctypes.c_int(ctypes.sizeof(mem))
uk = ctypes.c_int(self.uk_flag)
# Call function
res_code = self.engine.Be06Res(model_string_buffer, mem, ctypes.byref(status), uk)
# Decode result
key_xml = mem.raw.decode(TEXT_CODEC).strip('\0\n')
# Return result
return res_code != 0, key_xml
def get_summer_comfort(self, project_path):
# type: (str) -> (bool, str)
"""
This function returns the summer comfort temperatures for each hour of every day the whole year
"""
# Prepare arguments
if sys.version_info < (3, 0):
# python2
path = ctypes.create_string_buffer(project_path)
else:
# python3
path = ctypes.create_string_buffer(project_path.encode(TEXT_CODEC))
# Construct path to target file
target_path = os.path.splitext(project_path)[0] + '_tmp.xml'
# Call function
res_code = self.engine.Be06Temp(path)
# Handle results
if res_code == 0:
# Check if the resulting file exists
if os.path.exists(target_path):
# Read the file
with open(target_path, 'r') as file:
data = file.read()
# Clean up
os.remove(target_path)
# Return result
return True, data
else:
logging.error('Results file was not found. Something went wrong in the calculation engine.')
return False, ''
else:
# Handle error cases
if os.path.exists(target_path):
os.remove(target_path)
if res_code == 2:
logging.error('The building is non-residential')
elif res_code == 3:
logging.error('There was a problem with the climate data')
else:
logging.error('Unknown error. Return code: {}'.format(res_code))
return False, ''
def is_license_valid(self):
# type: () -> bool
"""
This function checks whether a valid license is found.
Supported from version 8.17.1.17 and forward.
:return: true if license is valid
"""
# Call function
res_code = self.engine.IsLicenseValid()
# Return result
return res_code == 0
|
quchunguang/test
|
refs/heads/master
|
testpy3/testdbm.py
|
1
|
import dbm
# Open database, creating it if necessary.
with dbm.open('foo.db', 'c') as db:
# Record some values
db[b'hello'] = b'there'
db['www.python.org'] = 'Python Website'
db['www.cnn.com'] = 'Cable News Network'
# Note that the keys are considered bytes now.
assert db[b'www.python.org'] == b'Python Website'
# Notice how the value is now in bytes.
assert db['www.cnn.com'] == b'Cable News Network'
# Often-used methods of the dict interface work too.
print(db.get('python.org', b'not present'))
# Storing a non-string key or value will raise an exception (most
# likely a TypeError).
# db['www.yahoo.com'] = 4
# db is automatically closed when leaving the with statement.
|
da1z/intellij-community
|
refs/heads/master
|
python/testData/resolve/PercentKeyWordArgs.py
|
38
|
"This is my favourite number%(<ref>kwg)d!" % {'kwg': 4181}
|
da1z/intellij-community
|
refs/heads/master
|
python/testData/refactoring/unwrap/whileUnwrap_after.py
|
273
|
x = 1
|
bohlian/erpnext
|
refs/heads/develop
|
erpnext/setup/doctype/company/company_dashboard.py
|
13
|
from frappe import _
def get_data():
return {
'heatmap': True,
'heatmap_message': _('This is based on transactions against this Company. See timeline below for details'),
'graph': True,
'graph_method': "frappe.utils.goal.get_monthly_goal_graph_data",
'graph_method_args': {
'title': _('Sales'),
'goal_value_field': 'monthly_sales_target',
'goal_total_field': 'total_monthly_sales',
'goal_history_field': 'sales_monthly_history',
'goal_doctype': 'Sales Invoice',
'goal_doctype_link': 'company',
'goal_field': 'base_grand_total',
'date_field': 'posting_date',
'filter_str': 'status != "Draft"',
'aggregation': 'sum'
},
'fieldname': 'company',
'transactions': [
{
'label': _('Pre Sales'),
'items': ['Quotation']
},
{
'label': _('Orders'),
'items': ['Sales Order', 'Delivery Note', 'Sales Invoice']
},
{
'label': _('Support'),
'items': ['Issue']
},
{
'label': _('Projects'),
'items': ['Project']
}
]
}
|
hpfem/agros2d
|
refs/heads/master
|
resources/python/pylint/checkers/exceptions.py
|
4
|
# Copyright (c) 2003-2007 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""exceptions handling (raising, catching, exceptions classes) checker
"""
import sys
from logilab.common.compat import builtins
BUILTINS_NAME = builtins.__name__
from logilab import astng
from logilab.astng import YES, Instance, unpack_infer
from pylint.checkers import BaseChecker
from pylint.checkers.utils import is_empty, is_raising
from pylint.interfaces import IASTNGChecker
OVERGENERAL_EXCEPTIONS = ('Exception',)
MSGS = {
'E0701': ('Bad except clauses order (%s)',
'bad-except-order',
'Used when except clauses are not in the correct order (from the '
'more specific to the more generic). If you don\'t fix the order, '
'some exceptions may not be catched by the most specific handler.'),
'E0702': ('Raising %s while only classes, instances or string are allowed',
'raising-bad-type',
'Used when something which is neither a class, an instance or a \
string is raised (i.e. a `TypeError` will be raised).'),
'E0710': ('Raising a new style class which doesn\'t inherit from BaseException',
'raising-non-exception',
'Used when a new style class which doesn\'t inherit from \
BaseException is raised.'),
'E0711': ('NotImplemented raised - should raise NotImplementedError',
'notimplemented-raised',
'Used when NotImplemented is raised instead of \
NotImplementedError'),
'W0701': ('Raising a string exception',
'raising-string',
'Used when a string exception is raised.'),
'W0702': ('No exception type(s) specified',
'bare-except',
'Used when an except clause doesn\'t specify exceptions type to \
catch.'),
'W0703': ('Catching too general exception %s',
'broad-except',
'Used when an except catches a too general exception, \
possibly burying unrelated errors.'),
'W0704': ('Except doesn\'t do anything',
'pointless-except',
'Used when an except clause does nothing but "pass" and there is\
no "else" clause.'),
'W0710': ('Exception doesn\'t inherit from standard "Exception" class',
'nonstandard-exception',
'Used when a custom exception class is raised but doesn\'t \
inherit from the builtin "Exception" class.'),
'W0711': ('Exception to catch is the result of a binary "%s" operation',
'binary-op-exception',
'Used when the exception to catch is of the form \
"except A or B:". If intending to catch multiple, \
rewrite as "except (A, B):"'),
}
if sys.version_info < (3, 0):
EXCEPTIONS_MODULE = "exceptions"
else:
EXCEPTIONS_MODULE = "builtins"
class ExceptionsChecker(BaseChecker):
"""checks for
* excepts without exception filter
* type of raise argument : string, Exceptions, other values
"""
__implements__ = IASTNGChecker
name = 'exceptions'
msgs = MSGS
priority = -4
options = (('overgeneral-exceptions',
{'default' : OVERGENERAL_EXCEPTIONS,
'type' :'csv', 'metavar' : '<comma-separated class names>',
'help' : 'Exceptions that will emit a warning '
'when being caught. Defaults to "%s"' % (
', '.join(OVERGENERAL_EXCEPTIONS),)}
),
)
def visit_raise(self, node):
"""visit raise possibly inferring value"""
# ignore empty raise
if node.exc is None:
return
expr = node.exc
if self._check_raise_value(node, expr):
return
else:
try:
value = unpack_infer(expr).next()
except astng.InferenceError:
return
self._check_raise_value(node, value)
def _check_raise_value(self, node, expr):
"""check for bad values, string exception and class inheritance
"""
value_found = True
if isinstance(expr, astng.Const):
value = expr.value
if isinstance(value, str):
self.add_message('W0701', node=node)
else:
self.add_message('E0702', node=node,
args=value.__class__.__name__)
elif (isinstance(expr, astng.Name) and \
expr.name in ('None', 'True', 'False')) or \
isinstance(expr, (astng.List, astng.Dict, astng.Tuple,
astng.Module, astng.Function)):
self.add_message('E0702', node=node, args=expr.name)
elif ( (isinstance(expr, astng.Name) and expr.name == 'NotImplemented')
or (isinstance(expr, astng.CallFunc) and
isinstance(expr.func, astng.Name) and
expr.func.name == 'NotImplemented') ):
self.add_message('E0711', node=node)
elif isinstance(expr, astng.BinOp) and expr.op == '%':
self.add_message('W0701', node=node)
elif isinstance(expr, (Instance, astng.Class)):
if isinstance(expr, Instance):
expr = expr._proxied
if (isinstance(expr, astng.Class) and
not inherit_from_std_ex(expr) and
expr.root().name != BUILTINS_NAME):
if expr.newstyle:
self.add_message('E0710', node=node)
else:
self.add_message('W0710', node=node)
else:
value_found = False
else:
value_found = False
return value_found
def visit_tryexcept(self, node):
"""check for empty except"""
exceptions_classes = []
nb_handlers = len(node.handlers)
for index, handler in enumerate(node.handlers):
# single except doing nothing but "pass" without else clause
if nb_handlers == 1 and is_empty(handler.body) and not node.orelse:
self.add_message('W0704', node=handler.type or handler.body[0])
if handler.type is None:
if nb_handlers == 1 and not is_raising(handler.body):
self.add_message('W0702', node=handler)
# check if a "except:" is followed by some other
# except
elif index < (nb_handlers - 1):
msg = 'empty except clause should always appear last'
self.add_message('E0701', node=node, args=msg)
elif isinstance(handler.type, astng.BoolOp):
self.add_message('W0711', node=handler, args=handler.type.op)
else:
try:
excs = list(unpack_infer(handler.type))
except astng.InferenceError:
continue
for exc in excs:
# XXX skip other non class nodes
if exc is YES or not isinstance(exc, astng.Class):
continue
exc_ancestors = [anc for anc in exc.ancestors()
if isinstance(anc, astng.Class)]
for previous_exc in exceptions_classes:
if previous_exc in exc_ancestors:
msg = '%s is an ancestor class of %s' % (
previous_exc.name, exc.name)
self.add_message('E0701', node=handler.type, args=msg)
if (exc.name in self.config.overgeneral_exceptions
and exc.root().name == EXCEPTIONS_MODULE
and nb_handlers == 1 and not is_raising(handler.body)):
self.add_message('W0703', args=exc.name, node=handler.type)
exceptions_classes += excs
def inherit_from_std_ex(node):
"""return true if the given class node is subclass of
exceptions.Exception
"""
if node.name in ('Exception', 'BaseException') \
and node.root().name == EXCEPTIONS_MODULE:
return True
for parent in node.ancestors(recurs=False):
if inherit_from_std_ex(parent):
return True
return False
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(ExceptionsChecker(linter))
|
proversity-org/edx-platform
|
refs/heads/master
|
common/djangoapps/rocket_chat/urls.py
|
1
|
from django.conf.urls import include, url
from rocket_chat import views
urlpatterns = [
url(r'', views.rocket_chat_discussion, name='rocket_chat_discussion'),
]
|
dex4er/django
|
refs/heads/1.6.x
|
django/contrib/formtools/wizard/storage/base.py
|
216
|
from django.core.files.uploadedfile import UploadedFile
from django.utils.datastructures import MultiValueDict
from django.utils.functional import lazy_property
from django.utils import six
from django.contrib.formtools.wizard.storage.exceptions import NoFileStorageConfigured
class BaseStorage(object):
step_key = 'step'
step_data_key = 'step_data'
step_files_key = 'step_files'
extra_data_key = 'extra_data'
def __init__(self, prefix, request=None, file_storage=None):
self.prefix = 'wizard_%s' % prefix
self.request = request
self.file_storage = file_storage
def init_data(self):
self.data = {
self.step_key: None,
self.step_data_key: {},
self.step_files_key: {},
self.extra_data_key: {},
}
def reset(self):
self.init_data()
def _get_current_step(self):
return self.data[self.step_key]
def _set_current_step(self, step):
self.data[self.step_key] = step
current_step = lazy_property(_get_current_step, _set_current_step)
def _get_extra_data(self):
return self.data[self.extra_data_key]
def _set_extra_data(self, extra_data):
self.data[self.extra_data_key] = extra_data
extra_data = lazy_property(_get_extra_data, _set_extra_data)
def get_step_data(self, step):
# When reading the serialized data, upconvert it to a MultiValueDict,
# some serializers (json) don't preserve the type of the object.
values = self.data[self.step_data_key].get(step, None)
if values is not None:
values = MultiValueDict(values)
return values
def set_step_data(self, step, cleaned_data):
# If the value is a MultiValueDict, convert it to a regular dict of the
# underlying contents. Some serializers call the public API on it (as
# opposed to the underlying dict methods), in which case the content
# can be truncated (__getitem__ returns only the first item).
if isinstance(cleaned_data, MultiValueDict):
cleaned_data = dict(cleaned_data.lists())
self.data[self.step_data_key][step] = cleaned_data
@property
def current_step_data(self):
return self.get_step_data(self.current_step)
def get_step_files(self, step):
wizard_files = self.data[self.step_files_key].get(step, {})
if wizard_files and not self.file_storage:
raise NoFileStorageConfigured(
"You need to define 'file_storage' in your "
"wizard view in order to handle file uploads.")
files = {}
for field, field_dict in six.iteritems(wizard_files):
field_dict = field_dict.copy()
tmp_name = field_dict.pop('tmp_name')
files[field] = UploadedFile(
file=self.file_storage.open(tmp_name), **field_dict)
return files or None
def set_step_files(self, step, files):
if files and not self.file_storage:
raise NoFileStorageConfigured(
"You need to define 'file_storage' in your "
"wizard view in order to handle file uploads.")
if step not in self.data[self.step_files_key]:
self.data[self.step_files_key][step] = {}
for field, field_file in six.iteritems(files or {}):
tmp_filename = self.file_storage.save(field_file.name, field_file)
file_dict = {
'tmp_name': tmp_filename,
'name': field_file.name,
'content_type': field_file.content_type,
'size': field_file.size,
'charset': field_file.charset
}
self.data[self.step_files_key][step][field] = file_dict
@property
def current_step_files(self):
return self.get_step_files(self.current_step)
def update_response(self, response):
pass
|
alandmoore/django_auth_ldap3
|
refs/heads/master
|
setup.py
|
1
|
from setuptools import find_packages, setup
from django_auth_ldap3 import __version__ as version
setup(
name='django_auth_ldap3',
version=version,
license='BSD',
author='Sam Kingston',
author_email='sam@sjkwi.com.au',
description='A library for connecting Django\'s authentication system to an LDAP directory',
url='https://github.com/sjkingo/django_auth_ldap3',
install_requires=[
'Django >= 1.6.10',
'ldap3 >= 0.9.7.1',
],
packages=find_packages(),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Systems Administration :: Authentication/Directory :: LDAP',
],
)
|
freecoder-zrw/leetcode
|
refs/heads/master
|
partition_list.py
|
1
|
#coding:utf8
from data_struct import ListNode
class Solution:
# @param {ListNode} head
# @param {integer} x
# @return {ListNode}
def partition(self, head, x):
less_head, less_tail = None, None
gt_head, gt_tail = None, None
node = head
while node:
if node.val < x:
if not less_head:
less_head = node
if less_tail:
less_tail.next = node
less_tail = node
else:
if not gt_head:
gt_head = node
if gt_tail:
gt_tail.next = node
gt_tail = node
node = node.next
if less_tail:
less_tail.next = gt_head
if gt_tail:
gt_tail.next = None
return less_head if less_head else gt_head
def test_partition_list():
head = ListNode.array2list([1,4,3,2,5,2])
head.trace()
s = Solution()
ret = s.partition(None, 0)
if ret:
ret.trace()
if __name__ == '__main__':
test_partition_list()
|
atbaker/docker-django
|
refs/heads/master
|
django-example/config/production.py
|
4
|
# -*- coding: utf-8 -*-
'''
Production Configurations
- Use djangosecure
- Use Amazon's S3 for storing static files and uploaded media
- Use sendgird to sendemails
- Use MEMCACHIER on Heroku
'''
from configurations import values
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
try:
from S3 import CallingFormat
AWS_CALLING_FORMAT = CallingFormat.SUBDOMAIN
except ImportError:
# TODO: Fix this where even if in Dev this class is called.
pass
from .common import Common
class Production(Common):
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# INSTALLED_APPS
INSTALLED_APPS = Common.INSTALLED_APPS
# END INSTALLED_APPS
# SECRET KEY
SECRET_KEY = values.SecretValue()
# END SECRET KEY
# django-secure
INSTALLED_APPS += ("djangosecure", )
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = values.BooleanValue(True)
SECURE_FRAME_DENY = values.BooleanValue(True)
SECURE_CONTENT_TYPE_NOSNIFF = values.BooleanValue(True)
SECURE_BROWSER_XSS_FILTER = values.BooleanValue(True)
SESSION_COOKIE_SECURE = values.BooleanValue(False)
SESSION_COOKIE_HTTPONLY = values.BooleanValue(True)
SECURE_SSL_REDIRECT = values.BooleanValue(True)
# end django-secure
# SITE CONFIGURATION
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["*"]
# END SITE CONFIGURATION
INSTALLED_APPS += ("gunicorn", )
# STORAGE CONFIGURATION
# See: http://django-storages.readthedocs.org/en/latest/index.html
INSTALLED_APPS += (
'storages',
)
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
STATICFILES_STORAGE = DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = values.SecretValue()
AWS_SECRET_ACCESS_KEY = values.SecretValue()
AWS_STORAGE_BUCKET_NAME = values.SecretValue()
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
# see: https://github.com/antonagestam/collectfast
AWS_PRELOAD_METADATA = True
INSTALLED_APPS += ("collectfast", )
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIREY = 60 * 60 * 24 * 7
AWS_HEADERS = {
'Cache-Control': 'max-age=%d, s-maxage=%d, must-revalidate' % (
AWS_EXPIREY, AWS_EXPIREY)
}
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
# END STORAGE CONFIGURATION
# EMAIL
DEFAULT_FROM_EMAIL = values.Value('django_example <noreply@example.com>')
EMAIL_HOST = values.Value('smtp.sendgrid.com')
EMAIL_HOST_PASSWORD = values.SecretValue(environ_prefix="", environ_name="SENDGRID_PASSWORD")
EMAIL_HOST_USER = values.SecretValue(environ_prefix="", environ_name="SENDGRID_USERNAME")
EMAIL_PORT = values.IntegerValue(587, environ_prefix="", environ_name="EMAIL_PORT")
EMAIL_SUBJECT_PREFIX = values.Value('[django_example] ', environ_name="EMAIL_SUBJECT_PREFIX")
EMAIL_USE_TLS = True
SERVER_EMAIL = EMAIL_HOST_USER
# END EMAIL
# TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
# END TEMPLATE CONFIGURATION
# CACHING
# Only do this here because thanks to django-pylibmc-sasl and pylibmc
# memcacheify is painful to install on windows.
try:
# See: https://github.com/rdegges/django-heroku-memcacheify
from memcacheify import memcacheify
CACHES = memcacheify()
except ImportError:
CACHES = values.CacheURLValue(default="memcached://127.0.0.1:11211")
# END CACHING
# Your production stuff: Below this line define 3rd party libary settings
|
miguelparaiso/PracticaOdoo
|
refs/heads/master
|
openerp/addons/test_impex/__init__.py
|
2148
|
import models
|
amitkaps/full-stack-data-science
|
refs/heads/master
|
credit-risk-deploy/webapp/forms.py
|
1
|
from flask_wtf import FlaskForm
from wtforms import StringField, SelectField, IntegerField
from wtforms.validators import Required, Email
class LoanForm(FlaskForm):
amount = IntegerField(
'Loan Amount',
validators=[Required()])
duration = IntegerField(
'Loan Duration in Years',
validators=[Required()])
name = StringField(
'Your Name',
validators=[Required()])
email = StringField(
'Email address',
validators=[Required(), Email()])
age = IntegerField(
'Your Age',
validators=[Required()])
income = IntegerField(
'Anual Income',
validators=[Required()])
ownership = SelectField(
'Home Ownership',
choices = [
('RENT', 'Rent'),
('MORTGAGE', 'Mortgage'),
('OWN', 'Own'),
])
|
odoousers2014/odoo
|
refs/heads/master
|
addons/mail/wizard/email_template_preview.py
|
7
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009 Sharoon Thomas
# Copyright (C) 2010-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp.osv import fields, osv
class email_template_preview(osv.osv_memory):
_inherit = "mail.template"
_name = "email_template.preview"
_description = "Email Template Preview"
def _get_records(self, cr, uid, context=None):
"""
Return Records of particular Email Template's Model
"""
if context is None:
context = {}
template_id = context.get('template_id', False)
if not template_id:
return []
email_template = self.pool.get('mail.template')
template = email_template.browse(cr, uid, int(template_id), context=context)
template_object = template.model_id
model = self.pool[template_object.model]
record_ids = model.search(cr, uid, [], 0, 10, 'id', context=context)
default_id = context.get('default_res_id')
if default_id and default_id not in record_ids:
record_ids.insert(0, default_id)
return model.name_get(cr, uid, record_ids, context)
def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
result = super(email_template_preview, self).default_get(cr, uid, fields, context=context)
email_template = self.pool.get('mail.template')
template_id = context.get('template_id')
if 'res_id' in fields and not result.get('res_id'):
records = self._get_records(cr, uid, context=context)
result['res_id'] = records and records[0][0] or False # select first record as a Default
if template_id and 'model_id' in fields and not result.get('model_id'):
result['model_id'] = email_template.read(cr, uid, int(template_id), ['model_id'], context).get('model_id', False)
return result
_columns = {
'res_id': fields.selection(_get_records, 'Sample Document'),
'partner_ids': fields.many2many('res.partner', string='Recipients'),
}
def on_change_res_id(self, cr, uid, ids, res_id, context=None):
if context is None:
context = {'value': {}}
if not res_id or not context.get('template_id'):
return {'value': {}}
email_template = self.pool.get('mail.template')
template_id = context.get('template_id')
template = email_template.browse(cr, uid, template_id, context=context)
# generate and get template values
mail_values = email_template.generate_email(cr, uid, template_id, res_id, context=context)
vals = dict((field, mail_values.get(field, False)) for field in ('email_from', 'email_to', 'email_cc', 'reply_to', 'subject', 'body_html', 'partner_to', 'partner_ids', 'attachment_ids'))
vals['name'] = template.name
return {'value': vals}
|
NeuralEnsemble/neuroConstruct
|
refs/heads/master
|
lib/jython/Lib/plistlib.py
|
141
|
r"""plistlib.py -- a tool to generate and parse MacOSX .plist files.
The PropertyList (.plist) file format is a simple XML pickle supporting
basic object types, like dictionaries, lists, numbers and strings.
Usually the top level object is a dictionary.
To write out a plist file, use the writePlist(rootObject, pathOrFile)
function. 'rootObject' is the top level object, 'pathOrFile' is a
filename or a (writable) file object.
To parse a plist from a file, use the readPlist(pathOrFile) function,
with a file name or a (readable) file object as the only argument. It
returns the top level object (again, usually a dictionary).
To work with plist data in strings, you can use readPlistFromString()
and writePlistToString().
Values can be strings, integers, floats, booleans, tuples, lists,
dictionaries, Data or datetime.datetime objects. String values (including
dictionary keys) may be unicode strings -- they will be written out as
UTF-8.
The <data> plist type is supported through the Data class. This is a
thin wrapper around a Python string.
Generate Plist example:
pl = dict(
aString="Doodah",
aList=["A", "B", 12, 32.1, [1, 2, 3]],
aFloat=0.1,
anInt=728,
aDict=dict(
anotherString="<hello & hi there!>",
aUnicodeValue=u'M\xe4ssig, Ma\xdf',
aTrueValue=True,
aFalseValue=False,
),
someData=Data("<binary gunk>"),
someMoreData=Data("<lots of binary gunk>" * 10),
aDate=datetime.datetime.fromtimestamp(time.mktime(time.gmtime())),
)
# unicode keys are possible, but a little awkward to use:
pl[u'\xc5benraa'] = "That was a unicode key."
writePlist(pl, fileName)
Parse Plist example:
pl = readPlist(pathOrFile)
print pl["aKey"]
"""
__all__ = [
"readPlist", "writePlist", "readPlistFromString", "writePlistToString",
"readPlistFromResource", "writePlistToResource",
"Plist", "Data", "Dict"
]
# Note: the Plist and Dict classes have been deprecated.
import binascii
import datetime
from cStringIO import StringIO
import re
import warnings
def readPlist(pathOrFile):
"""Read a .plist file. 'pathOrFile' may either be a file name or a
(readable) file object. Return the unpacked root object (which
usually is a dictionary).
"""
didOpen = 0
if isinstance(pathOrFile, (str, unicode)):
pathOrFile = open(pathOrFile)
didOpen = 1
p = PlistParser()
rootObject = p.parse(pathOrFile)
if didOpen:
pathOrFile.close()
return rootObject
def writePlist(rootObject, pathOrFile):
"""Write 'rootObject' to a .plist file. 'pathOrFile' may either be a
file name or a (writable) file object.
"""
didOpen = 0
if isinstance(pathOrFile, (str, unicode)):
pathOrFile = open(pathOrFile, "w")
didOpen = 1
writer = PlistWriter(pathOrFile)
writer.writeln("<plist version=\"1.0\">")
writer.writeValue(rootObject)
writer.writeln("</plist>")
if didOpen:
pathOrFile.close()
def readPlistFromString(data):
"""Read a plist data from a string. Return the root object.
"""
return readPlist(StringIO(data))
def writePlistToString(rootObject):
"""Return 'rootObject' as a plist-formatted string.
"""
f = StringIO()
writePlist(rootObject, f)
return f.getvalue()
def readPlistFromResource(path, restype='plst', resid=0):
"""Read plst resource from the resource fork of path.
"""
warnings.warnpy3k("In 3.x, readPlistFromResource is removed.",
stacklevel=2)
from Carbon.File import FSRef, FSGetResourceForkName
from Carbon.Files import fsRdPerm
from Carbon import Res
fsRef = FSRef(path)
resNum = Res.FSOpenResourceFile(fsRef, FSGetResourceForkName(), fsRdPerm)
Res.UseResFile(resNum)
plistData = Res.Get1Resource(restype, resid).data
Res.CloseResFile(resNum)
return readPlistFromString(plistData)
def writePlistToResource(rootObject, path, restype='plst', resid=0):
"""Write 'rootObject' as a plst resource to the resource fork of path.
"""
warnings.warnpy3k("In 3.x, writePlistToResource is removed.", stacklevel=2)
from Carbon.File import FSRef, FSGetResourceForkName
from Carbon.Files import fsRdWrPerm
from Carbon import Res
plistData = writePlistToString(rootObject)
fsRef = FSRef(path)
resNum = Res.FSOpenResourceFile(fsRef, FSGetResourceForkName(), fsRdWrPerm)
Res.UseResFile(resNum)
try:
Res.Get1Resource(restype, resid).RemoveResource()
except Res.Error:
pass
res = Res.Resource(plistData)
res.AddResource(restype, resid, '')
res.WriteResource()
Res.CloseResFile(resNum)
class DumbXMLWriter:
def __init__(self, file, indentLevel=0, indent="\t"):
self.file = file
self.stack = []
self.indentLevel = indentLevel
self.indent = indent
def beginElement(self, element):
self.stack.append(element)
self.writeln("<%s>" % element)
self.indentLevel += 1
def endElement(self, element):
assert self.indentLevel > 0
assert self.stack.pop() == element
self.indentLevel -= 1
self.writeln("</%s>" % element)
def simpleElement(self, element, value=None):
if value is not None:
value = _escapeAndEncode(value)
self.writeln("<%s>%s</%s>" % (element, value, element))
else:
self.writeln("<%s/>" % element)
def writeln(self, line):
if line:
self.file.write(self.indentLevel * self.indent + line + "\n")
else:
self.file.write("\n")
# Contents should conform to a subset of ISO 8601
# (in particular, YYYY '-' MM '-' DD 'T' HH ':' MM ':' SS 'Z'. Smaller units may be omitted with
# a loss of precision)
_dateParser = re.compile(r"(?P<year>\d\d\d\d)(?:-(?P<month>\d\d)(?:-(?P<day>\d\d)(?:T(?P<hour>\d\d)(?::(?P<minute>\d\d)(?::(?P<second>\d\d))?)?)?)?)?Z")
def _dateFromString(s):
order = ('year', 'month', 'day', 'hour', 'minute', 'second')
gd = _dateParser.match(s).groupdict()
lst = []
for key in order:
val = gd[key]
if val is None:
break
lst.append(int(val))
return datetime.datetime(*lst)
def _dateToString(d):
return '%04d-%02d-%02dT%02d:%02d:%02dZ' % (
d.year, d.month, d.day,
d.hour, d.minute, d.second
)
# Regex to find any control chars, except for \t \n and \r
_controlCharPat = re.compile(
r"[\x00\x01\x02\x03\x04\x05\x06\x07\x08\x0b\x0c\x0e\x0f"
r"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f]")
def _escapeAndEncode(text):
m = _controlCharPat.search(text)
if m is not None:
raise ValueError("strings can't contains control characters; "
"use plistlib.Data instead")
text = text.replace("\r\n", "\n") # convert DOS line endings
text = text.replace("\r", "\n") # convert Mac line endings
text = text.replace("&", "&") # escape '&'
text = text.replace("<", "<") # escape '<'
text = text.replace(">", ">") # escape '>'
return text.encode("utf-8") # encode as UTF-8
PLISTHEADER = """\
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
"""
class PlistWriter(DumbXMLWriter):
def __init__(self, file, indentLevel=0, indent="\t", writeHeader=1):
if writeHeader:
file.write(PLISTHEADER)
DumbXMLWriter.__init__(self, file, indentLevel, indent)
def writeValue(self, value):
if isinstance(value, (str, unicode)):
self.simpleElement("string", value)
elif isinstance(value, bool):
# must switch for bool before int, as bool is a
# subclass of int...
if value:
self.simpleElement("true")
else:
self.simpleElement("false")
elif isinstance(value, (int, long)):
self.simpleElement("integer", "%d" % value)
elif isinstance(value, float):
self.simpleElement("real", repr(value))
elif isinstance(value, dict):
self.writeDict(value)
elif isinstance(value, Data):
self.writeData(value)
elif isinstance(value, datetime.datetime):
self.simpleElement("date", _dateToString(value))
elif isinstance(value, (tuple, list)):
self.writeArray(value)
else:
raise TypeError("unsuported type: %s" % type(value))
def writeData(self, data):
self.beginElement("data")
self.indentLevel -= 1
maxlinelength = 76 - len(self.indent.replace("\t", " " * 8) *
self.indentLevel)
for line in data.asBase64(maxlinelength).split("\n"):
if line:
self.writeln(line)
self.indentLevel += 1
self.endElement("data")
def writeDict(self, d):
self.beginElement("dict")
items = d.items()
items.sort()
for key, value in items:
if not isinstance(key, (str, unicode)):
raise TypeError("keys must be strings")
self.simpleElement("key", key)
self.writeValue(value)
self.endElement("dict")
def writeArray(self, array):
self.beginElement("array")
for value in array:
self.writeValue(value)
self.endElement("array")
class _InternalDict(dict):
# This class is needed while Dict is scheduled for deprecation:
# we only need to warn when a *user* instantiates Dict or when
# the "attribute notation for dict keys" is used.
def __getattr__(self, attr):
try:
value = self[attr]
except KeyError:
raise AttributeError, attr
from warnings import warn
warn("Attribute access from plist dicts is deprecated, use d[key] "
"notation instead", PendingDeprecationWarning, 2)
return value
def __setattr__(self, attr, value):
from warnings import warn
warn("Attribute access from plist dicts is deprecated, use d[key] "
"notation instead", PendingDeprecationWarning, 2)
self[attr] = value
def __delattr__(self, attr):
try:
del self[attr]
except KeyError:
raise AttributeError, attr
from warnings import warn
warn("Attribute access from plist dicts is deprecated, use d[key] "
"notation instead", PendingDeprecationWarning, 2)
class Dict(_InternalDict):
def __init__(self, **kwargs):
from warnings import warn
warn("The plistlib.Dict class is deprecated, use builtin dict instead",
PendingDeprecationWarning, 2)
super(Dict, self).__init__(**kwargs)
class Plist(_InternalDict):
"""This class has been deprecated. Use readPlist() and writePlist()
functions instead, together with regular dict objects.
"""
def __init__(self, **kwargs):
from warnings import warn
warn("The Plist class is deprecated, use the readPlist() and "
"writePlist() functions instead", PendingDeprecationWarning, 2)
super(Plist, self).__init__(**kwargs)
def fromFile(cls, pathOrFile):
"""Deprecated. Use the readPlist() function instead."""
rootObject = readPlist(pathOrFile)
plist = cls()
plist.update(rootObject)
return plist
fromFile = classmethod(fromFile)
def write(self, pathOrFile):
"""Deprecated. Use the writePlist() function instead."""
writePlist(self, pathOrFile)
def _encodeBase64(s, maxlinelength=76):
# copied from base64.encodestring(), with added maxlinelength argument
maxbinsize = (maxlinelength//4)*3
pieces = []
for i in range(0, len(s), maxbinsize):
chunk = s[i : i + maxbinsize]
pieces.append(binascii.b2a_base64(chunk))
return "".join(pieces)
class Data:
"""Wrapper for binary data."""
def __init__(self, data):
self.data = data
def fromBase64(cls, data):
# base64.decodestring just calls binascii.a2b_base64;
# it seems overkill to use both base64 and binascii.
return cls(binascii.a2b_base64(data))
fromBase64 = classmethod(fromBase64)
def asBase64(self, maxlinelength=76):
return _encodeBase64(self.data, maxlinelength)
def __cmp__(self, other):
if isinstance(other, self.__class__):
return cmp(self.data, other.data)
elif isinstance(other, str):
return cmp(self.data, other)
else:
return cmp(id(self), id(other))
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self.data))
class PlistParser:
def __init__(self):
self.stack = []
self.currentKey = None
self.root = None
def parse(self, fileobj):
from xml.parsers.expat import ParserCreate
parser = ParserCreate()
parser.StartElementHandler = self.handleBeginElement
parser.EndElementHandler = self.handleEndElement
parser.CharacterDataHandler = self.handleData
parser.ParseFile(fileobj)
return self.root
def handleBeginElement(self, element, attrs):
self.data = []
handler = getattr(self, "begin_" + element, None)
if handler is not None:
handler(attrs)
def handleEndElement(self, element):
handler = getattr(self, "end_" + element, None)
if handler is not None:
handler()
def handleData(self, data):
self.data.append(data)
def addObject(self, value):
if self.currentKey is not None:
self.stack[-1][self.currentKey] = value
self.currentKey = None
elif not self.stack:
# this is the root object
self.root = value
else:
self.stack[-1].append(value)
def getData(self):
data = "".join(self.data)
try:
data = data.encode("ascii")
except UnicodeError:
pass
self.data = []
return data
# element handlers
def begin_dict(self, attrs):
d = _InternalDict()
self.addObject(d)
self.stack.append(d)
def end_dict(self):
self.stack.pop()
def end_key(self):
self.currentKey = self.getData()
def begin_array(self, attrs):
a = []
self.addObject(a)
self.stack.append(a)
def end_array(self):
self.stack.pop()
def end_true(self):
self.addObject(True)
def end_false(self):
self.addObject(False)
def end_integer(self):
self.addObject(int(self.getData()))
def end_real(self):
self.addObject(float(self.getData()))
def end_string(self):
self.addObject(self.getData())
def end_data(self):
self.addObject(Data.fromBase64(self.getData()))
def end_date(self):
self.addObject(_dateFromString(self.getData()))
|
HackLinux/goblin-core
|
refs/heads/master
|
llvm/3.4.2/llvm-3.4.2.src/bindings/python/llvm/tests/test_disassembler.py
|
9
|
from .base import TestBase
from ..disassembler import Disassembler, Option_UseMarkup
class TestDisassembler(TestBase):
def test_instantiate(self):
Disassembler('i686-apple-darwin9')
def test_basic(self):
sequence = '\x67\xe3\x81' # jcxz -127
triple = 'i686-apple-darwin9'
disassembler = Disassembler(triple)
count, s = disassembler.get_instruction(sequence)
self.assertEqual(count, 3)
self.assertEqual(s, '\tjcxz\t-127')
def test_nonexistant_triple(self):
with self.assertRaisesRegexp(Exception, "Could not obtain disassembler for triple"):
Disassembler("nonexistant-triple-raises")
def test_get_instructions(self):
sequence = '\x67\xe3\x81\x01\xc7' # jcxz -127; addl %eax, %edi
disassembler = Disassembler('i686-apple-darwin9')
instructions = list(disassembler.get_instructions(sequence))
self.assertEqual(len(instructions), 2)
self.assertEqual(instructions[0], (0, 3, '\tjcxz\t-127'))
self.assertEqual(instructions[1], (3, 2, '\taddl\t%eax, %edi'))
def test_set_options(self):
sequence = '\x10\x40\x2d\xe9'
triple = 'arm-linux-android'
disassembler = Disassembler(triple)
disassembler.set_options(Option_UseMarkup)
count, s = disassembler.get_instruction(sequence)
print s
self.assertEqual(count, 4)
self.assertEqual(s, '\tpush\t{<reg:r4>, <reg:lr>}')
|
adsznzhang/learntosolveit
|
refs/heads/version1
|
languages/python/networking_twisted_parallel1.py
|
7
|
from twisted.internet import defer, task
from twisted.python import log
from twisted.internet import reactor
from twisted.web import client
from twisted.internet.utils import getProcessValue
def parallel(iterable, count, callable, *args, **named):
print args, named
coop = task.Cooperator()
work = (callable(elem, *args, **named) for elem in iterable)
return defer.DeferredList([coop.coiterate(work) for i in xrange(count)])
def download((url, fileName)):
return client.downloadPage(url, file(fileName, 'wb'))
urls = [(url, str(n)) for (n, url) in enumerate(file('urls.txt'))]
finished = parallel(urls, 50, download)
finished.addErrback(log.err)
finished.addCallback(lambda ign: reactor.stop())
reactor.run()
|
ThreatCentral/blackberries
|
refs/heads/master
|
configure.py
|
1
|
#!/usr/bin/env python
# (c) Copyright [2016] Hewlett Packard Enterprise Development LP Licensed under
# the Apache License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of the License
# at Unless required by applicable
# law or agreed to in writing, software distributed under the License is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
__author__ = 'Bart Otten'
__copyright__ = '(c) Copyright [2016] Hewlett Packard Enterprise Development LP'
__credits__ = []
__license__ = 'Apache 2.0'
__version__ = '1'
__maintainer__ = 'Bart Otten'
__email__ = 'tc-support@hpe.com'
__status__ = 'Development'
from sys import exit
from os import name, makedirs
from os.path import join, expanduser, isdir, isfile
import argparse
class CreateProfile:
# Use the default directory
working_dir = join(expanduser('~'), '.canari')
package = 'ThreatCentral'
def __init__(self):
pass
def run_canari_profile():
print "Creating configuration file for Maltego."
try:
from canari.commands.create_profile import create_profile
except ImportError:
print "Failed creating configuration file for Maltego!"
print "Please run : canari create-profile ThreatCentral"
exit("IF this fails, please reinstall the Canari framework.")
else:
create_profile(CreateProfile())
def insert_account_details():
print "Loading canari.easygui ..."
try:
from canari.easygui import multpasswordbox
except ImportError:
print "Failed loading canari.easygui, trying to read from commandline"
try:
from getpass import getpass
except ImportError:
print "Failed loading getpass module, please add your Threat Central account details to {}".format(
join(expanduser("~"), '.canari', 'ThreatCentral.conf'))
else:
print "Please enter your Threat Central account details"
print ""
return raw_input("Username: "), getpass()
else:
return multpasswordbox(msg='Please enter your username and password for Threat Central.',
title='Threat Central', fields=('username', 'password'))
def save_apikey():
try:
from ThreatCentral.transforms.common.client import generate_apikey, set_to_config
except ImportError as e:
print "Failed loading ThreatCentral module. Please reinstall ThreatCentral."
exit(e)
else:
acc_details = insert_account_details()
if acc_details:
api_key = generate_apikey(acc_details[0], acc_details[1])
return set_to_config(option='apikey', value=api_key[0])
def check_config_file():
try:
from ThreatCentral.transforms.common.client import (get_from_config, set_to_config, canari_conf_path)
except ImportError as e:
print "Failed loading modules {}".format(e)
else:
print "Checking default {} ...".format(canari_conf_path)
if not isfile(join(expanduser("~"), '.canari', 'canari.conf')):
print "{} not found, creating file...".format(canari_conf_path)
try:
open(join(expanduser("~"), '.canari', 'canari.conf'), 'w').write('[default]')
if not set_to_config(option='configs', value='ThreatCentral.conf', section='default',
path=canari_conf_path):
exit("Failed to set the default values in {} !".format(canari_conf_path))
if not set_to_config(option='path', value='${PATH}', section='default',
path=canari_conf_path):
exit("Failed to set the default values in {} !".format(canari_conf_path))
except IOError:
exit("Failed creating {}".format(canari_conf_path))
d = get_from_config(option='configs', section='default', path=canari_conf_path)
if not d and d != '':
if not set_to_config(option='configs', value='ThreatCentral.conf', section='default',
path=canari_conf_path):
exit("Failed to set the default values in {} !".format(canari_conf_path))
d = get_from_config(option='path', section='default', path=canari_conf_path)
if not d and d != '':
if not set_to_config(option='path', value='${PATH}', section='default',
path=canari_conf_path):
exit("Failed to set the default values in {} !".format(canari_conf_path))
d = get_from_config(option='packages', section='remote', path=canari_conf_path)
if not d and d != '':
print "Default canari file is missing some default values, trying to add these ..."
if not set_to_config(option='packages', value='', section='remote', path=canari_conf_path):
exit("Failed to set the default values in {} !".format(canari_conf_path))
else:
print "{} OK".format(canari_conf_path)
else:
print "{} OK".format(canari_conf_path)
def check_user_rights():
try:
from platform import platform
except ImportError:
quit('Cannot load platform module')
else:
if not platform() and platform() != '' and platform()[:7].lower() != 'windows':
try:
from os import geteuid, getlogin, setgid, setuid
except ImportError:
quit('Cannot load geteuit, getlogin, setgid or setuid, quitting!')
else:
if name == 'posix' and not geteuid():
login = getlogin()
if login != 'root':
print "Bringing down user rights"
try:
import pwd
except ImportError as e:
exit("Failed loading the pwd module!")
else:
user = pwd.getpwnam(login)
setgid(user.pw_gid)
setuid(user.pw_uid)
def init():
# Threat Central now accepts API Keys, removing keyring
print "Checking canari configuration ..."
try:
print "Loading Modules..."
# from ThreatCentral.transforms.common.client import (check_config, get_from_config, set_to_config,
# canari_conf_path, set_to_keyring)
from ThreatCentral.transforms.common.client import (check_config, get_from_config, set_to_config,
canari_conf_path)
except ImportError:
exit("Failed loading some ThreadCentral modules. Please reinstall ThreatCentral.")
except KeyboardInterrupt:
exit("Quiting")
else:
if isdir(join(expanduser("~"), '.canari')):
print "canari folder OK"
check_config_file()
# run_canari_profile()
else:
print "{} folder not found, creating...".format(join(expanduser("~"), '.canari'))
try:
makedirs(join(expanduser("~"), '.canari'))
except IOError as e:
print "Print failed creating {} : {}".format(join(expanduser("~"), '.canari'), e)
else:
check_config_file()
# run_canari_profile()
def interactive():
q = raw_input("Do you want to initialize the Canari configuration files? y/N ").lower().strip()
if q == 'y':
init()
q = raw_input("Do you want to set up the API key? y/N ").lower().strip()
if q == 'y':
if save_apikey():
print 'API KEY saved to {}'.format(join(expanduser("~"), '.canari', 'ThreatCentral.conf'))
else:
print 'Failed saving API KEY to {}'.format(join(expanduser("~"), '.canari', 'ThreatCentral.conf'))
q = raw_input("Do you want to create the config file for Maltego? y/N ").lower().strip()
if q == 'y':
run_canari_profile()
if __name__ == '__main__':
# Bring down user rights
check_user_rights()
parser = argparse.ArgumentParser(description="Configures Maltego Threat Central package")
parser.add_argument('--init', action="store_true", help="Initializes configuration files")
parser.add_argument('--apikey', action="store_true", help="Registers apikey and saves to configuration file")
parser.add_argument('--configure', action="store_true", help="Runs Canari create-profile to create the Maltego "
"configuration file")
parser.add_argument('--interactive', action='store_true', help="Interactive mode")
parser.add_argument('--version', action='version', version='%(prog)s 1.0')
args = parser.parse_args()
if args.init:
init()
elif args.apikey:
if save_apikey():
print 'API KEY saved to {}'.format(join(expanduser("~"), '.canari', 'ThreatCentral.conf'))
else:
print 'Failed saving API KEY to {}'.format(join(expanduser("~"), '.canari', 'ThreatCentral.conf'))
elif args.configure:
run_canari_profile()
else:
# run interactive modes to makes it easier.
interactive()
|
miltonruelas/cursotecnico
|
refs/heads/7.0
|
branch/base_workflow/__init__.py
|
8
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2013 Cubic ERP - Teradata SAC. (http://cubicerp.com).
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import workflow
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
kajgan/e2
|
refs/heads/master
|
lib/python/Plugins/SystemPlugins/SoftwareManager/PluginBackup.py
|
4
|
from enigma import *
from Screens.Screen import Screen
from Components.Button import Button
from Components.Label import Label
from Components.ActionMap import ActionMap
from Screens.Console import Console
class PluginBackup(Screen):
skin = """
<screen position="center,center" size="560,400" title="Plugin Backup">
<ePixmap position="0,360" zPosition="1" size="140,40" pixmap="skin_default/buttons/red.png" transparent="1" alphatest="on" />
<ePixmap position="140,360" zPosition="1" size="140,40" pixmap="skin_default/buttons/green.png" transparent="1" alphatest="on" />
<widget name="key_red" position="0,360" zPosition="2" size="140,40" valign="center" halign="center" font="Regular;21" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
<widget name="key_green" position="140,360" zPosition="2" size="140,40" valign="center" halign="center" font="Regular;21" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
<widget name="info-pluginbackup" position="10,30" zPosition="1" size="550,100" font="Regular;20" halign="left" valign="top" transparent="1" />
</screen>"""
def __init__(self, session, args = 0):
Screen.__init__(self, session)
self.session = session
self["key_green"] = Button("Backup Plugins")
self["key_red"] = Button("Exit")
self["info-pluginbackup"] = Label(_("Installed Plugins will be written to:\n/etc/enigma2/installed-list.txt\nMake sure you include that file into settings backup."))
self["actions"] = ActionMap(["OkCancelActions", "ColorActions"],
{
"green": self.green,
"red": self.quit,
"cancel": self.quit,
}, -2)
def quit(self):
self.close()
def green(self):
self.session.open(Console, title = "Plugin Backup", cmdlist = ["sh '/usr/lib/enigma2/python/Plugins/SystemPlugins/SoftwareManager/PluginBackup.sh'"])
|
interDist/pasportaservo
|
refs/heads/_master
|
pasportaservo/wsgi.py
|
4
|
"""
WSGI config for pasportaservo project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "pasportaservo.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pasportaservo.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application # noqa: E402
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
testmana2/test
|
refs/heads/master
|
Utilities/__init__.py
|
1
|
# -*- coding: utf-8 -*-
# Copyright (c) 2003 - 2015 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Package implementing various functions/classes needed everywhere within eric6.
"""
from __future__ import unicode_literals
try:
str = unicode
import locale
import urllib
def quote(url):
"""
Replacement for the urllib.quote function because of unicode problems.
@param url text to quote (string)
@return quoted url (string)
"""
return urllib.quote(url.encode('utf-8'))
except NameError:
basestring = str
from urllib.parse import quote # __IGNORE_WARNING__
import os
import sys
import codecs
import re
import fnmatch
import glob
import getpass
def __showwarning(message, category, filename, lineno, file=None, line=""):
"""
Module function to raise a SyntaxError for a SyntaxWarning.
@param message warning object
@param category type object of the warning
@param filename name of the file causing the warning (string)
@param lineno line number causing the warning (integer)
@param file file to write the warning message to (ignored)
@param line line causing the warning (ignored)
@raise SyntaxError
"""
if category is SyntaxWarning:
err = SyntaxError(str(message))
err.filename = filename
err.lineno = lineno
raise err
import warnings
warnings.showwarning = __showwarning
from codecs import BOM_UTF8, BOM_UTF16, BOM_UTF32
from PyQt5.QtCore import QRegExp, QDir, QProcess, Qt, QByteArray, \
qVersion, PYQT_VERSION_STR, QCoreApplication, QCryptographicHash
from PyQt5.Qsci import QSCINTILLA_VERSION_STR, QsciScintilla
# import these methods into the Utilities namespace
from Globals import ( # __IGNORE_WARNING__
isWindowsPlatform, isLinuxPlatform, isMacPlatform, getConfigDir,
setConfigDir, getPythonModulesDirectory, getPyQt5ModulesDirectory,
getQtBinariesPath)
from E5Gui.E5Application import e5App
from UI.Info import Program, Version
import Preferences
from Plugins.CheckerPlugins.SyntaxChecker.SyntaxCheck import ( # __IGNORE_WARNING__
normalizeCode)
from eric6config import getConfig
configDir = None
codingBytes_regexps = [
(2, re.compile(br'''coding[:=]\s*([-\w_.]+)''')),
(1, re.compile(br'''<\?xml.*\bencoding\s*=\s*['"]([-\w_.]+)['"]\?>''')),
]
coding_regexps = [
(2, re.compile(r'''coding[:=]\s*([-\w_.]+)''')),
(1, re.compile(r'''<\?xml.*\bencoding\s*=\s*['"]([-\w_.]+)['"]\?>''')),
]
supportedCodecs = [
'utf-8',
'iso8859-1', 'iso8859-15', 'iso8859-2', 'iso8859-3',
'iso8859-4', 'iso8859-5', 'iso8859-6', 'iso8859-7',
'iso8859-8', 'iso8859-9', 'iso8859-10', 'iso8859-11',
'iso8859-13', 'iso8859-14', 'iso8859-16', 'latin-1',
'koi8-r', 'koi8-u',
'utf-16', 'utf-32',
'cp037', 'cp424', 'cp437', 'cp500', 'cp737', 'cp775',
'cp850', 'cp852', 'cp855', 'cp856', 'cp857', 'cp860',
'cp861', 'cp862', 'cp863', 'cp864', 'cp865', 'cp866',
'cp869', 'cp874', 'cp875', 'cp932', 'cp949', 'cp950',
'cp1006', 'cp1026', 'cp1140', 'cp1250', 'cp1251',
'cp1252', 'cp1253', 'cp1254', 'cp1255', 'cp1256',
'cp1257', 'cp1258',
'gb2312', 'gb18030',
'ascii'
]
class CodingError(Exception):
"""
Class implementing an exception, which is raised, if a given coding is
incorrect.
"""
def __init__(self, coding):
"""
Constructor
@param coding coding to include in the message (string)
"""
self.errorMessage = QCoreApplication.translate(
"CodingError",
"The coding '{0}' is wrong for the given text.").format(coding)
def __repr__(self):
"""
Special method returning a representation of the exception.
@return string representing the error message
"""
return str(self.errorMessage)
def __str__(self):
"""
Special method returning a string representation of the exception.
@return string representing the error message
"""
return str(self.errorMessage)
def get_codingBytes(text):
"""
Function to get the coding of a bytes text.
@param text bytes text to inspect (bytes)
@return coding string
"""
lines = text.splitlines()
for coding in codingBytes_regexps:
coding_re = coding[1]
head = lines[:coding[0]]
for l in head:
m = coding_re.search(l)
if m:
return str(m.group(1), "ascii").lower()
return None
def get_coding(text):
"""
Function to get the coding of a text.
@param text text to inspect (string)
@return coding string
"""
lines = text.splitlines()
for coding in coding_regexps:
coding_re = coding[1]
head = lines[:coding[0]]
for l in head:
m = coding_re.search(l)
if m:
return m.group(1).lower()
return None
def readEncodedFile(filename):
"""
Function to read a file and decode its contents into proper text.
@param filename name of the file to read (string)
@return tuple of decoded text and encoding (string, string)
"""
f = open(filename, "rb")
text = f.read()
f.close()
return decode(text)
def readEncodedFileWithHash(filename):
"""
Function to read a file, calculate a hash value and decode its contents
into proper text.
@param filename name of the file to read (string)
@return tuple of decoded text, encoding and hash value (string, string,
string)
"""
f = open(filename, "rb")
text = f.read()
f.close()
hash = str(QCryptographicHash.hash(
QByteArray(text), QCryptographicHash.Md5).toHex(),
encoding="ASCII")
return decode(text) + (hash, )
def decode(text):
"""
Function to decode some byte text into a string.
@param text byte text to decode (bytes)
@return tuple of decoded text and encoding (string, string)
"""
try:
if text.startswith(BOM_UTF8):
# UTF-8 with BOM
return str(text[len(BOM_UTF8):], 'utf-8'), 'utf-8-bom'
elif text.startswith(BOM_UTF16):
# UTF-16 with BOM
return str(text[len(BOM_UTF16):], 'utf-16'), 'utf-16'
elif text.startswith(BOM_UTF32):
# UTF-32 with BOM
return str(text[len(BOM_UTF32):], 'utf-32'), 'utf-32'
coding = get_codingBytes(text)
if coding:
return str(text, coding), coding
except (UnicodeError, LookupError):
pass
# Assume UTF-8
try:
return str(text, 'utf-8'), 'utf-8-guessed'
except (UnicodeError, LookupError):
pass
guess = None
if Preferences.getEditor("AdvancedEncodingDetection"):
# Try the universal character encoding detector
try:
import ThirdParty.CharDet.chardet
guess = ThirdParty.CharDet.chardet.detect(text)
if guess and guess['confidence'] > 0.95 and \
guess['encoding'] is not None:
codec = guess['encoding'].lower()
return str(text, codec), '{0}-guessed'.format(codec)
except (UnicodeError, LookupError):
pass
except ImportError:
pass
# Try default encoding
try:
codec = Preferences.getEditor("DefaultEncoding")
return str(text, codec), '{0}-default'.format(codec)
except (UnicodeError, LookupError):
pass
if Preferences.getEditor("AdvancedEncodingDetection"):
# Use the guessed one even if confifence level is low
if guess and guess['encoding'] is not None:
try:
codec = guess['encoding'].lower()
return str(text, codec), '{0}-guessed'.format(codec)
except (UnicodeError, LookupError):
pass
# Assume UTF-8 loosing information
return str(text, "utf-8", "ignore"), 'utf-8-ignore'
def readEncodedFileWithEncoding(filename, encoding):
"""
Function to read a file and decode its contents into proper text.
@param filename name of the file to read (string)
@keyparam encoding encoding to be used to read the file (string)
@return tuple of decoded text and encoding (string, string)
"""
f = open(filename, "rb")
text = f.read()
f.close()
if encoding:
try:
return str(text, encoding), '{0}-selected'.format(encoding)
except (UnicodeError, LookupError):
pass
# Try default encoding
try:
codec = Preferences.getEditor("DefaultEncoding")
return str(text, codec), '{0}-default'.format(codec)
except (UnicodeError, LookupError):
pass
# Assume UTF-8 loosing information
return str(text, "utf-8", "ignore"), 'utf-8-ignore'
else:
return decode(text)
def writeEncodedFile(filename, text, orig_coding):
"""
Function to write a file with properly encoded text.
@param filename name of the file to read (string)
@param text text to be written (string)
@param orig_coding type of the original encoding (string)
@return encoding used for writing the file (string)
"""
etext, encoding = encode(text, orig_coding)
f = open(filename, "wb")
f.write(etext)
f.close()
return encoding
def encode(text, orig_coding):
"""
Function to encode text into a byte text.
@param text text to be encoded (string)
@param orig_coding type of the original encoding (string)
@return tuple of encoded text and encoding used (bytes, string)
@exception CodingError raised to indicate an invalid encoding
"""
encoding = None
if orig_coding == 'utf-8-bom':
etext, encoding = BOM_UTF8 + text.encode("utf-8"), 'utf-8-bom'
else:
# Try declared coding spec
coding = get_coding(text)
if coding:
try:
etext, encoding = text.encode(coding), coding
except (UnicodeError, LookupError):
# Error: Declared encoding is incorrect
raise CodingError(coding)
else:
if orig_coding and orig_coding.endswith(
('-selected', '-default', '-guessed', '-ignore')):
coding = orig_coding\
.replace("-selected", "")\
.replace("-default", "")\
.replace("-guessed", "")\
.replace("-ignore", "")
try:
etext, encoding = text.encode(coding), coding
except (UnicodeError, LookupError):
pass
if encoding is None:
# Try configured default
try:
codec = Preferences.getEditor("DefaultEncoding")
etext, encoding = text.encode(codec), codec
except (UnicodeError, LookupError):
pass
if encoding is None:
# Try saving as ASCII
try:
etext, encoding = text.encode('ascii'), 'ascii'
except UnicodeError:
pass
if encoding is None:
# Save as UTF-8 without BOM
etext, encoding = text.encode('utf-8'), 'utf-8'
return etext, encoding
def decodeString(text):
"""
Function to decode a string containing Unicode encoded characters.
@param text text containing encoded chars (string)
@return decoded text (string)
"""
buf = b""
index = 0
while index < len(text):
if text[index] == "\\":
qb = QByteArray.fromHex(text[index:index + 4])
buf += bytes(qb)
index += 4
else:
buf += codecs.encode(text[index], "utf-8")
index += 1
buf = buf.replace(b"\x00", b"")
return decodeBytes(buf)
def decodeBytes(buffer):
"""
Function to decode some byte text into a string.
@param buffer byte buffer to decode (bytes)
@return decoded text (string)
"""
# try UTF with BOM
try:
if buffer.startswith(BOM_UTF8):
# UTF-8 with BOM
return str(buffer[len(BOM_UTF8):], encoding='utf-8')
elif buffer.startswith(BOM_UTF16):
# UTF-16 with BOM
return str(buffer[len(BOM_UTF16):], encoding='utf-16')
elif buffer.startswith(BOM_UTF32):
# UTF-32 with BOM
return str(buffer[len(BOM_UTF32):], encoding='utf-32')
except (UnicodeError, LookupError):
pass
# try UTF-8
try:
return str(buffer, encoding="utf-8")
except UnicodeError:
pass
# try codec detection
try:
import ThirdParty.CharDet.chardet
guess = ThirdParty.CharDet.chardet.detect(buffer)
if guess and guess['encoding'] is not None:
codec = guess['encoding'].lower()
return str(buffer, encoding=codec)
except (UnicodeError, LookupError):
pass
except ImportError:
pass
return str(buffer, encoding="utf-8", errors="ignore")
def readStringFromStream(stream):
"""
Module function to read a string from the given stream.
@param stream data stream opened for reading (QDataStream)
@return string read from the stream (string)
"""
data = stream.readString()
if data is None:
data = b""
return data.decode('utf-8')
_escape = re.compile("[&<>\"'\u0080-\uffff]")
_escape_map = {
"&": "&",
"<": "<",
">": ">",
'"': """,
"'": "'",
}
def escape_entities(m, map=_escape_map):
"""
Function to encode html entities.
@param m the match object
@param map the map of entities to encode
@return the converted text (string)
"""
char = m.group()
text = map.get(char)
if text is None:
text = "&#{0:d};".format(ord(char))
return text
def html_encode(text, pattern=_escape):
"""
Function to correctly encode a text for html.
@param text text to be encoded (string)
@param pattern search pattern for text to be encoded (string)
@return the encoded text (string)
"""
if not text:
return ""
text = pattern.sub(escape_entities, text)
return text
_uescape = re.compile('[\u0080-\uffff]')
def escape_uentities(m):
"""
Function to encode html entities.
@param m the match object
@return the converted text (string)
"""
char = m.group()
text = "&#{0:d};".format(ord(char))
return text
def html_uencode(text, pattern=_uescape):
"""
Function to correctly encode a unicode text for html.
@param text text to be encoded (string)
@param pattern search pattern for text to be encoded (string)
@return the encoded text (string)
"""
if not text:
return ""
text = pattern.sub(escape_uentities, text)
return text
_uunescape = re.compile('&#\d+;')
def unescape_uentities(m):
"""
Function to decode html entities.
@param m the match object
@return the converted text (string)
"""
char = m.group()
ord = int(char[2:-1])
return chr(ord)
def html_udecode(text, pattern=_uunescape):
"""
Function to correctly decode a html text to a unicode text.
@param text text to be decoded (string)
@param pattern search pattern for text to be decoded (string)
@return the decoded text (string)
"""
if not text:
return ""
text = pattern.sub(unescape_uentities, text)
return text
def convertLineEnds(text, eol):
"""
Function to convert the end of line characters.
@param text text to be converted (string)
@param eol new eol setting (string)
@return text with converted eols (string)
"""
if eol == '\r\n':
regexp = re.compile(r"""(\r(?!\n)|(?<!\r)\n)""")
return regexp.sub(lambda m, eol='\r\n': eol, text)
elif eol == '\n':
regexp = re.compile(r"""(\r\n|\r)""")
return regexp.sub(lambda m, eol='\n': eol, text)
elif eol == '\r':
regexp = re.compile(r"""(\r\n|\n)""")
return regexp.sub(lambda m, eol='\r': eol, text)
else:
return text
def linesep():
"""
Function to return the lineseparator used by the editor.
@return line separator used by the editor (string)
"""
eolMode = Preferences.getEditor("EOLMode")
if eolMode == QsciScintilla.EolUnix:
return "\n"
elif eolMode == QsciScintilla.EolMac:
return "\r"
else:
return "\r\n"
def extractFlags(text):
"""
Function to extract eric specific flags out of the given text.
Flags are contained in comments and are introduced by 'eflag:'.
The rest of the line is interpreted as 'key = value'. value is
analyzed for being an integer or float value. If that fails, it
is assumed to be a string. If a key does not contain a '='
character, it is assumed to be a boolean flag. Flags are expected
at the very end of a file. The search is ended, if a line without
the 'eflag:' marker is found.
@param text text to be scanned (string)
@return dictionary of string, boolean, complex, float and int
"""
flags = {}
if isinstance(text, basestring):
lines = text.rstrip().splitlines()
else:
lines = text
for line in reversed(lines):
try:
index = line.index("eflag:")
except ValueError:
# no flag found, don't look any further
break
flag = line[index + 6:].strip()
if "=" in flag:
key, value = flag.split("=", 1)
key = key.strip()
value = value.strip()
if value.lower() in ["true", "false", "yes", "no", "ok"]:
# it is a flag
flags[key] = value.lower() in ["true", "yes", "ok"]
continue
try:
# interpret as int first
value = int(value)
except ValueError:
try:
# interpret as float next
value = float(value)
except ValueError:
pass
flags[key] = value
else:
# treat it as a boolean
if flag[0] == "-":
# false flags start with '-'
flags[flag[1:]] = False
else:
flags[flag] = True
return flags
def extractFlagsFromFile(filename):
"""
Function to extract eric specific flags out of the given file.
@param filename name of the file to be scanned (string)
@return dictionary of string, boolean, complex, float and int
"""
try:
source, encoding = readEncodedFile(filename)
except (UnicodeError, IOError):
return {}
return extractFlags(source)
def extractLineFlags(line, startComment="#", endComment=""):
"""
Function to extract flags starting and ending with '__' from a line
comment.
@param line line to extract flags from (string)
@keyparam startComment string identifying the start of the comment (string)
@keyparam endComment string identifying the end of a comment (string)
@return list containing the extracted flags (list of strings)
"""
flags = []
pos = line.rfind(startComment)
if pos >= 0:
comment = line[pos + len(startComment):].strip()
if endComment:
comment = comment.replace("endComment", "")
flags = [f.strip() for f in comment.split()
if (f.startswith("__") and f.endswith("__"))]
return flags
def toNativeSeparators(path):
"""
Function returning a path, that is using native separator characters.
@param path path to be converted (string)
@return path with converted separator characters (string)
"""
return QDir.toNativeSeparators(path)
def fromNativeSeparators(path):
"""
Function returning a path, that is using "/" separator characters.
@param path path to be converted (string)
@return path with converted separator characters (string)
"""
return QDir.fromNativeSeparators(path)
def normcasepath(path):
"""
Function returning a path, that is normalized with respect to its case
and references.
@param path file path (string)
@return case normalized path (string)
"""
return os.path.normcase(os.path.normpath(path))
def normabspath(path):
"""
Function returning a normalized, absolute path.
@param path file path (string)
@return absolute, normalized path (string)
"""
return os.path.abspath(path)
def normcaseabspath(path):
"""
Function returning an absolute path, that is normalized with respect to
its case and references.
@param path file path (string)
@return absolute, normalized path (string)
"""
return os.path.normcase(os.path.abspath(path))
def normjoinpath(a, *p):
"""
Function returning a normalized path of the joined parts passed into it.
@param a first path to be joined (string)
@param p variable number of path parts to be joind (string)
@return normalized path (string)
"""
return os.path.normpath(os.path.join(a, *p))
def normabsjoinpath(a, *p):
"""
Function returning a normalized, absolute path of the joined parts passed
into it.
@param a first path to be joined (string)
@param p variable number of path parts to be joind (string)
@return absolute, normalized path (string)
"""
return os.path.abspath(os.path.join(a, *p))
def relpath(path, start=os.path.curdir):
"""
Return a relative version of a path.
@param path path to make relative (string)
@param start path to make relative from (string)
@return relative path (string)
@exception ValueError raised to indicate an invalid path
"""
if not path:
raise ValueError("no path specified")
start_list = os.path.abspath(start).split(os.path.sep)
path_list = os.path.abspath(path).split(os.path.sep)
# Work out how much of the filepath is shared by start and path.
i = len(os.path.commonprefix([start_list, path_list]))
rel_list = [os.path.pardir] * (len(start_list) - i) + path_list[i:]
if not rel_list:
return os.path.curdir
return os.path.join(*rel_list)
def isinpath(file):
"""
Function to check for an executable file.
@param file filename of the executable to check (string)
@return flag to indicate, if the executable file is accessible
via the searchpath defined by the PATH environment variable.
"""
if os.path.isabs(file):
return os.access(file, os.X_OK)
if os.path.exists(os.path.join(os.curdir, file)):
return os.access(os.path.join(os.curdir, file), os.X_OK)
path = getEnvironmentEntry('PATH')
# environment variable not defined
if path is None:
return False
dirs = path.split(os.pathsep)
for dir in dirs:
if os.access(os.path.join(dir, file), os.X_OK):
return True
return False
def startswithPath(path, start):
"""
Function to check, if a path starts with a given start path.
@param path path to be checked (string)
@param start start path (string)
@return flag indicating that the path starts with the given start
path (boolean)
"""
if start:
if path == start:
return True
elif normcasepath(toNativeSeparators(path)).startswith(
normcasepath(toNativeSeparators(start + "/"))):
return True
else:
return False
else:
return False
def relativePath(path, start):
"""
Function to convert a file path to a path relative to a start path.
@param path file or directory name to convert (string)
@param start start path (string)
@return relative path or unchanged path, if path does not start with
the start path (string)
"""
if startswithPath(path, start):
if path == start:
return ""
else:
return path[len(start) + 1:]
else:
return path
def relativeUniversalPath(path, start):
"""
Function to convert a file path to a path relative to a start path
with universal separators.
@param path file or directory name to convert (string)
@param start start path (string)
@return relative path or unchanged path, if path does not start with
the start path with universal separators (string)
"""
return fromNativeSeparators(relativePath(path, start))
def absolutePath(path, start):
"""
Public method to convert a path relative to a start path to an
absolute path.
@param path file or directory name to convert (string)
@param start start path (string)
@return absolute path (string)
"""
if not os.path.isabs(path):
path = os.path.join(start, path)
return path
def absoluteUniversalPath(path, start):
"""
Public method to convert a path relative to a start path with
universal separators to an absolute path.
@param path file or directory name to convert (string)
@param start start path (string)
@return absolute path with native separators (string)
"""
if not os.path.isabs(path):
path = toNativeSeparators(os.path.join(start, path))
return path
def getExecutablePath(file):
"""
Function to build the full path of an executable file from the environment.
@param file filename of the executable to check (string)
@return full executable name, if the executable file is accessible
via the searchpath defined by the PATH environment variable, or an
empty string otherwise.
"""
if os.path.isabs(file):
if os.access(file, os.X_OK):
return file
else:
return ""
cur_path = os.path.join(os.curdir, file)
if os.path.exists(cur_path):
if os.access(cur_path, os.X_OK):
return cur_path
path = os.getenv('PATH')
# environment variable not defined
if path is None:
return ""
dirs = path.split(os.pathsep)
for dir in dirs:
exe = os.path.join(dir, file)
if os.access(exe, os.X_OK):
return exe
return ""
def getExecutablePaths(file):
"""
Function to build all full path of an executable file from the environment.
@param file filename of the executable (string)
@return list of full executable names (list of strings), if the executable
file is accessible via the searchpath defined by the PATH environment
variable, or an empty list otherwise.
"""
paths = []
if os.path.isabs(file):
if os.access(file, os.X_OK):
return [file]
else:
return []
cur_path = os.path.join(os.curdir, file)
if os.path.exists(cur_path):
if os.access(cur_path, os.X_OK):
paths.append(cur_path)
path = os.getenv('PATH')
# environment variable not defined
if path is not None:
dirs = path.split(os.pathsep)
for dir in dirs:
exe = os.path.join(dir, file)
if os.access(exe, os.X_OK) and exe not in paths:
paths.append(exe)
return paths
def isExecutable(exe):
"""
Function to check, if a file is executable.
@param exe filename of the executable to check (string)
@return flag indicating executable status (boolean)
"""
return os.access(exe, os.X_OK)
def samepath(f1, f2):
"""
Function to compare two paths.
@param f1 first path for the compare (string)
@param f2 second path for the compare (string)
@return flag indicating whether the two paths represent the
same path on disk.
"""
if f1 is None or f2 is None:
return False
if normcaseabspath(os.path.realpath(f1)) == \
normcaseabspath(os.path.realpath(f2)):
return True
return False
def samefilepath(f1, f2):
"""
Function to compare two paths. Strips the filename.
@param f1 first filepath for the compare (string)
@param f2 second filepath for the compare (string)
@return flag indicating whether the two paths represent the
same path on disk.
"""
if f1 is None or f2 is None:
return False
if (normcaseabspath(os.path.dirname(os.path.realpath(f1))) ==
normcaseabspath(os.path.dirname(os.path.realpath(f2)))):
return True
return False
try:
EXTSEP = os.extsep
except AttributeError:
EXTSEP = "."
def splitPath(name):
"""
Function to split a pathname into a directory part and a file part.
@param name path name (string)
@return a tuple of 2 strings (dirname, filename).
"""
if os.path.isdir(name):
dn = os.path.abspath(name)
fn = "."
else:
dn, fn = os.path.split(name)
return (dn, fn)
def joinext(prefix, ext):
"""
Function to join a file extension to a path.
The leading "." of ext is replaced by a platform specific extension
separator if necessary.
@param prefix the basepart of the filename (string)
@param ext the extension part (string)
@return the complete filename (string)
"""
if ext[0] != ".":
ext = ".{0}".format(ext)
# require leading separator to match os.path.splitext
return prefix + EXTSEP + ext[1:]
def compactPath(path, width, measure=len):
"""
Function to return a compacted path fitting inside the given width.
@param path path to be compacted (string)
@param width width for the compacted path (integer)
@param measure reference to a function used to measure the length of the
string
@return compacted path (string)
"""
if measure(path) <= width:
return path
ellipsis = '...'
head, tail = os.path.split(path)
mid = len(head) // 2
head1 = head[:mid]
head2 = head[mid:]
while head1:
# head1 is same size as head2 or one shorter
path = os.path.join("{0}{1}{2}".format(head1, ellipsis, head2), tail)
if measure(path) <= width:
return path
head1 = head1[:-1]
head2 = head2[1:]
path = os.path.join(ellipsis, tail)
if measure(path) <= width:
return path
while tail:
path = "{0}{1}".format(ellipsis, tail)
if measure(path) <= width:
return path
tail = tail[1:]
return ""
def direntries(path, filesonly=False, pattern=None, followsymlinks=True,
checkStop=None):
"""
Function returning a list of all files and directories.
@param path root of the tree to check
@param filesonly flag indicating that only files are wanted
@param pattern a filename pattern to check against
@param followsymlinks flag indicating whether symbolic links
should be followed
@param checkStop function to be called to check for a stop
@return list of all files and directories in the tree rooted
at path. The names are expanded to start with path.
"""
if filesonly:
files = []
else:
files = [path]
try:
entries = os.listdir(path)
for entry in entries:
if checkStop and checkStop():
break
if entry in ['CVS', 'cvs',
'.svn', '_svn',
'.hg', '_hg',
'.ropeproject', '_ropeproject',
'.eric6project', '_eric6project',
'.issues', '_issues']:
continue
fentry = os.path.join(path, entry)
if pattern and \
not os.path.isdir(fentry) and \
not fnmatch.fnmatch(entry, pattern):
# entry doesn't fit the given pattern
continue
if os.path.isdir(fentry):
if os.path.islink(fentry) and not followsymlinks:
continue
files += direntries(
fentry, filesonly, pattern, followsymlinks, checkStop)
else:
files.append(fentry)
except OSError:
pass
except UnicodeDecodeError:
pass
return files
def getDirs(path, excludeDirs):
"""
Function returning a list of all directories below path.
@param path root of the tree to check
@param excludeDirs basename of directories to ignore
@return list of all directories found
"""
try:
names = os.listdir(path)
except EnvironmentError:
return
dirs = []
for name in names:
if os.path.isdir(os.path.join(path, name)) and \
not os.path.islink(os.path.join(path, name)):
exclude = 0
for e in excludeDirs:
if name.split(os.sep, 1)[0] == e:
exclude = 1
break
if not exclude:
dirs.append(os.path.join(path, name))
for name in dirs[:]:
if not os.path.islink(name):
dirs = dirs + getDirs(name, excludeDirs)
return dirs
def getTestFileName(fn):
"""
Function to build the filename of a unittest file.
The filename for the unittest file is built by prepending
the string "test" to the filename passed into this function.
@param fn filename basis to be used for the unittest filename (string)
@return filename of the corresponding unittest file (string)
"""
dn, fn = os.path.split(fn)
return os.path.join(dn, "test{0}".format(fn))
def parseOptionString(s):
"""
Function used to convert an option string into a list of options.
@param s option string (string or string)
@return list of options (list of strings)
"""
rx = QRegExp(r"""\s([\w=/-]*"[^"]+"|[\w=/-]*'[^']+'|[^\s]+)""")
s = re.sub(r"%[A-Z%]", _percentReplacementFunc, s)
return parseString(s, rx)
def parseEnvironmentString(s):
"""
Function used to convert an environment string into a list of environment
settings.
@param s environment string (string)
@return list of environment settings (list of strings)
"""
rx = QRegExp(r"""\s(\w+\+?=[^\s]+|\w+="[^"]+"|\w+='[^']+')""")
return parseString(s, rx)
def parseString(s, rx):
"""
Function used to convert a string into a list.
@param s string to be parsed (string)
@param rx regex defining the parse pattern (QRegExp)
@return list of parsed data (list of strings)
"""
olist = []
if not s.startswith(' '):
# prepare the string to fit our pattern
s = ' ' + s
pos = rx.indexIn(s)
while pos != -1:
cs = rx.cap(1)
if cs.startswith('"') or cs.startswith("'"):
cs = cs[1:-1]
olist.append(cs)
pos += rx.matchedLength()
pos = rx.indexIn(s, pos)
return olist
def _percentReplacementFunc(matchobj):
"""
Protected function called for replacing % codes.
@param matchobj matchobject for the code
@return replacement string
"""
return getPercentReplacement(matchobj.group(0))
def getPercentReplacement(code):
"""
Function to get the replacement for code.
@param code code indicator (string)
@return replacement string (string)
"""
if code in ["C", "%C"]:
# column of the cursor of the current editor
aw = e5App().getObject("ViewManager").activeWindow()
if aw is None:
column = -1
else:
column = aw.getCursorPosition()[1]
return "{0:d}".format(column)
elif code in ["D", "%D"]:
# directory of active editor
aw = e5App().getObject("ViewManager").activeWindow()
if aw is None:
dn = "not_available"
else:
fn = aw.getFileName()
if fn is None:
dn = "not_available"
else:
dn = os.path.dirname(fn)
return dn
elif code in ["F", "%F"]:
# filename (complete) of active editor
aw = e5App().getObject("ViewManager").activeWindow()
if aw is None:
fn = "not_available"
else:
fn = aw.getFileName()
if fn is None:
fn = "not_available"
return fn
elif code in ["H", "%H"]:
# home directory
return getHomeDir()
elif code in ["L", "%L"]:
# line of the cursor of the current editor
aw = e5App().getObject("ViewManager").activeWindow()
if aw is None:
line = 0
else:
line = aw.getCursorPosition()[0] + 1
return "{0:d}".format(line)
elif code in ["P", "%P"]:
# project path
projectPath = e5App().getObject("Project").getProjectPath()
if not projectPath:
projectPath = "not_available"
return projectPath
elif code in ["S", "%S"]:
# selected text of the current editor
aw = e5App().getObject("ViewManager").activeWindow()
if aw is None:
text = "not_available"
else:
text = aw.selectedText()
return text
elif code in ["U", "%U"]:
# username
un = getUserName()
if un is None:
return code
else:
return un
elif code in ["%", "%%"]:
# the percent sign
return "%"
else:
# unknown code, just return it
return code
def getPercentReplacementHelp():
"""
Function to get the help text for the supported %-codes.
@returns help text (string)
"""
return QCoreApplication.translate(
"Utilities",
"""<p>You may use %-codes as placeholders in the string."""
""" Supported codes are:"""
"""<table>"""
"""<tr><td>%C</td><td>column of the cursor of the current editor"""
"""</td></tr>"""
"""<tr><td>%D</td><td>directory of the current editor</td></tr>"""
"""<tr><td>%F</td><td>filename of the current editor</td></tr>"""
"""<tr><td>%H</td><td>home directory of the current user</td></tr>"""
"""<tr><td>%L</td><td>line of the cursor of the current editor"""
"""</td></tr>"""
"""<tr><td>%P</td><td>path of the current project</td></tr>"""
"""<tr><td>%S</td><td>selected text of the current editor</td></tr>"""
"""<tr><td>%U</td><td>username of the current user</td></tr>"""
"""<tr><td>%%</td><td>the percent sign</td></tr>"""
"""</table>"""
"""</p>""")
def getUserName():
"""
Function to get the user name.
@return user name (string)
"""
user = getpass.getuser()
if isWindowsPlatform():
if not user:
return win32_GetUserName()
else:
if sys.version_info[0] == 2:
user = user.decode(locale.getpreferredencoding())
return user
def getRealName():
"""
Function to get the real name of the user.
@return real name of the user (string)
"""
if isWindowsPlatform():
return win32_getRealName()
else:
import pwd
user = getpass.getuser()
return pwd.getpwnam(user).pw_gecos
def getHomeDir():
"""
Function to get a users home directory.
@return home directory (string)
"""
return QDir.homePath()
def getPythonLibPath():
"""
Function to determine the path to Python's library.
@return path to the Python library (string)
"""
pyFullVers = sys.version.split()[0]
vl = re.findall("[0-9.]*", pyFullVers)[0].split(".")
major = vl[0]
minor = vl[1]
pyVers = major + "." + minor
if isWindowsPlatform():
libDir = sys.prefix + "\\Lib"
else:
try:
syslib = sys.lib
except AttributeError:
syslib = "lib"
libDir = sys.prefix + "/" + syslib + "/python" + pyVers
return libDir
def getPythonVersion():
"""
Function to get the Python version (major, minor) as an integer value.
@return An integer representing major and minor version number (integer)
"""
return sys.hexversion >> 16
def determinePythonVersion(filename, source, editor=None):
"""
Function to determine the python version of a given file.
@param filename name of the file with extension (str)
@param source of the file (str)
@keyparam editor reference to the editor, if the file is opened
already (Editor object)
@return Python version if file is Python2 or Python3 (int)
"""
pyAssignment = {"Python": 2, "Python2": 2, "Python3": 3}
if not editor:
viewManager = e5App().getObject('ViewManager')
editor = viewManager.getOpenEditor(filename)
# Maybe the user has changed the language
if editor and editor.getFileType() in pyAssignment:
return pyAssignment[editor.getFileType()]
pyVer = 0
if filename:
if not source:
source = readEncodedFile(filename)[0]
flags = extractFlags(source)
ext = os.path.splitext(filename)[1]
py2Ext = Preferences.getPython("PythonExtensions")
py3Ext = Preferences.getPython("Python3Extensions")
project = e5App().getObject('Project')
basename = os.path.basename(filename)
if "FileType" in flags:
pyVer = pyAssignment.get(flags["FileType"], 0)
elif project.isOpen() and project.isProjectFile(filename):
language = project.getEditorLexerAssoc(basename)
if not language:
language = Preferences.getEditorLexerAssoc(basename)
if language in ['Python2', 'Python3']:
pyVer = pyAssignment[language]
if pyVer:
# Skip the next tests
pass
elif (Preferences.getProject("DeterminePyFromProject") and
project.isOpen() and
project.isProjectFile(filename) and
ext in py2Ext + py3Ext):
pyVer = pyAssignment.get(project.getProjectLanguage(), 0)
elif ext in py2Ext and ext not in py3Ext:
pyVer = 2
elif ext in py3Ext and ext not in py2Ext:
pyVer = 3
elif source:
if isinstance(source, basestring):
line0 = source.splitlines()[0]
else:
line0 = source[0]
if line0.startswith("#!"):
if "python3" in line0:
pyVer = 3
elif "python" in line0:
pyVer = 2
if pyVer == 0 and ext in py2Ext + py3Ext:
pyVer = sys.version_info[0]
if editor and pyVer:
editor.filetype = "Python{0}".format(pyVer)
return pyVer
###############################################################################
# functions for environment handling
###############################################################################
def getEnvironmentEntry(key, default=None):
"""
Module function to get an environment entry.
@param key key of the requested environment entry (string)
@param default value to be returned, if the environment doesn't contain
the requested entry (string)
@return the requested entry or the default value, if the entry wasn't
found (string or None)
"""
filter = QRegExp("^{0}[ \t]*=".format(key))
if isWindowsPlatform():
filter.setCaseSensitivity(Qt.CaseInsensitive)
entries = [e for e in QProcess.systemEnvironment()
if filter.indexIn(e) != -1]
if not entries:
return default
# if there are multiple entries, just consider the first one
ename, val = entries[0].split("=", 1)
return val.strip()
def hasEnvironmentEntry(key):
"""
Module function to check, if the environment contains an entry.
@param key key of the requested environment entry (string)
@return flag indicating the presence of the requested entry (boolean)
"""
filter = QRegExp("^{0}[ \t]*=".format(key))
if isWindowsPlatform():
filter.setCaseSensitivity(Qt.CaseInsensitive)
entries = [e for e in QProcess.systemEnvironment()
if filter.indexIn(e) != -1]
return len(entries) > 0
###############################################################################
# Qt utility functions below
###############################################################################
def generateQtToolName(toolname):
"""
Module function to generate the executable name for a Qt tool like
designer.
@param toolname base name of the tool (string)
@return the Qt tool name without extension (string)
"""
return "{0}{1}{2}".format(Preferences.getQt("QtToolsPrefix4"),
toolname,
Preferences.getQt("QtToolsPostfix4")
)
def getQtMacBundle(toolname):
"""
Module function to determine the correct Mac OS X bundle name for Qt tools.
@param toolname plain name of the tool (e.g. "designer") (string)
@return bundle name of the Qt tool (string)
"""
qtDir = getQtBinariesPath()
bundles = [
os.path.join(
qtDir, 'bin', generateQtToolName(toolname.capitalize())) + ".app",
os.path.join(qtDir, 'bin', generateQtToolName(toolname)) + ".app",
os.path.join(
qtDir, generateQtToolName(toolname.capitalize())) + ".app",
os.path.join(qtDir, generateQtToolName(toolname)) + ".app",
]
for bundle in bundles:
if os.path.exists(bundle):
return bundle
return ""
def prepareQtMacBundle(toolname, version, args):
"""
Module function for starting Qt tools that are Mac OS X bundles.
@param toolname plain name of the tool (e.g. "designer") (string)
@param version indication for the requested version (Qt 4) (integer)
@param args name of input file for tool, if any (list of strings)
@return command-name and args for QProcess (tuple)
"""
if version != 4:
return ("", [])
fullBundle = getQtMacBundle(toolname)
if fullBundle == "":
return ("", [])
newArgs = []
newArgs.append("-a")
newArgs.append(fullBundle)
if args:
newArgs.append("--args")
newArgs += args
return ("open", newArgs)
###############################################################################
# Qt utility functions below
###############################################################################
def generatePySideToolPath(toolname):
"""
Module function to generate the executable path for a PySide tool.
@param toolname base name of the tool (string or QString)
@return the PySide tool path with extension (string)
"""
if isWindowsPlatform():
pysideInterpreter = checkPyside()
interpreter = sys.version_info[0] - 2
hasPyside = pysideInterpreter[interpreter]
# if it isn't the internal interpreter, it has to be the external one
if not hasPyside:
interpreter = not interpreter
if interpreter:
prefix = os.path.dirname(
Preferences.getDebugger("Python3Interpreter"))
else:
prefix = os.path.dirname(
Preferences.getDebugger("PythonInterpreter"))
if toolname == "pyside-uic":
return os.path.join(prefix, "Scripts", toolname + '.exe')
else:
return os.path.join(prefix, "Lib", "site-packages", "PySide",
toolname + ".exe")
else:
return toolname
def checkPyside():
"""
Module function to check the presence of PySide.
@return list of two flags indicating the presence of PySide for Python2
and PySide for Python3 (boolean, boolean)
"""
pysideInformation = []
for interpreterName in ["PythonInterpreter", "Python3Interpreter"]:
interpreter = Preferences.getDebugger(interpreterName)
if interpreter == "" or not isinpath(interpreter):
hasPyside = False
else:
hasPyside = False
checker = os.path.join(getConfig('ericDir'),
"Utilities", "PySideImporter.py")
args = [checker]
proc = QProcess()
proc.setProcessChannelMode(QProcess.MergedChannels)
proc.start(interpreter, args)
finished = proc.waitForFinished(30000)
if finished:
if proc.exitCode() == 0:
hasPyside = True
pysideInformation.append(hasPyside)
return pysideInformation
###############################################################################
# Other utility functions below
###############################################################################
def generateVersionInfo(linesep='\n'):
"""
Module function to generate a string with various version infos.
@param linesep string to be used to separate lines (string)
@return string with version infos (string)
"""
try:
import sip
sip_version_str = sip.SIP_VERSION_STR
except (ImportError, AttributeError):
sip_version_str = "sip version not available"
info = "Version Numbers:{0} Python {1}{2}".format(
linesep, sys.version.split()[0], linesep)
info += " Qt {0}{1} PyQt {2}{3}".format(
qVersion(), linesep, PYQT_VERSION_STR, linesep)
info += " sip {0}{1} QScintilla {2}{3}".format(
sip_version_str, linesep, QSCINTILLA_VERSION_STR, linesep)
try:
from PyQt5.QtWebKit import qWebKitVersion
info += " WebKit {0}{1}".format(qWebKitVersion(), linesep)
except ImportError:
pass
info += " {0} {1}{2}".format(
Program, Version, linesep * 2)
info += "Platform: {0}{1}{2}{3}".format(
sys.platform, linesep, sys.version, linesep)
return info
def generatePluginsVersionInfo(linesep='\n'):
"""
Module function to generate a string with plugins version infos.
@param linesep string to be used to separate lines (string)
@return string with plugins version infos (string)
"""
infoStr = ""
app = e5App()
if app is not None:
try:
pm = app.getObject("PluginManager")
versions = {}
for info in pm.getPluginInfos():
versions[info[0]] = info[2]
infoStr = "Plugins Version Numbers:{0}".format(linesep)
for pluginName in sorted(versions.keys()):
infoStr += " {0} {1}{2}".format(
pluginName, versions[pluginName], linesep)
except KeyError:
pass
return infoStr
def generateDistroInfo(linesep='\n'):
"""
Module function to generate a string with distribution infos.
@param linesep string to be used to separate lines (string)
@return string with plugins version infos (string)
"""
infoStr = ""
if isLinuxPlatform():
releaseList = glob.glob("/etc/*-release")
if releaseList:
infoStr = "Distribution Info:{0}".format(linesep)
infoParas = []
for rfile in releaseList:
try:
f = open(rfile, "r")
lines = f.read().splitlines()
f.close
except IOError:
continue
lines.insert(0, rfile)
infoParas.append(' ' + (linesep + ' ').join(lines))
infoStr += (linesep + linesep).join(infoParas)
return infoStr
def toBool(dataStr):
"""
Module function to convert a string to a boolean value.
@param dataStr string to be converted (string)
@return converted boolean value (boolean)
"""
if dataStr in ["True", "true", "1", "Yes", "yes"]:
return True
elif dataStr in ["False", "false", "0", "No", "no"]:
return False
else:
return bool(dataStr)
###############################################################################
# posix compatibility functions below
###############################################################################
# None right now
###############################################################################
# win32 compatibility functions below
###############################################################################
def win32_Kill(pid):
"""
Function to provide an os.kill equivalent for Win32.
@param pid process id (integer)
@return result of the kill (boolean)
"""
import win32api
handle = win32api.OpenProcess(1, 0, pid)
return (0 != win32api.TerminateProcess(handle, 0))
def win32_GetUserName():
"""
Function to get the user name under Win32.
@return user name (string)
"""
try:
import win32api
return win32api.GetUserName()
except ImportError:
try:
u = getEnvironmentEntry('USERNAME')
except KeyError:
u = getEnvironmentEntry('username', None)
return u
def win32_getRealName():
"""
Function to get the user's real name (aka. display name) under Win32.
@return real name of the current user (string)
"""
import ctypes
GetUserNameEx = ctypes.windll.secur32.GetUserNameExW
NameDisplay = 3
size = ctypes.pointer(ctypes.c_ulong(0))
GetUserNameEx(NameDisplay, None, size)
nameBuffer = ctypes.create_unicode_buffer(size.contents.value)
GetUserNameEx(NameDisplay, nameBuffer, size)
return nameBuffer.value
|
proxysh/Safejumper-for-Desktop
|
refs/heads/master
|
buildlinux/env64/lib/python2.7/site-packages/twisted/test/process_twisted.py
|
13
|
"""A process that reads from stdin and out using Twisted."""
from __future__ import division, absolute_import, print_function
### Twisted Preamble
# This makes sure that users don't have to set up their environment
# specially in order to run these programs from bin/.
import sys, os
pos = os.path.abspath(sys.argv[0]).find(os.sep+'Twisted')
if pos != -1:
sys.path.insert(0, os.path.abspath(sys.argv[0])[:pos+8])
sys.path.insert(0, os.curdir)
### end of preamble
from twisted.python import log
from zope.interface import implementer
from twisted.internet import interfaces
log.startLogging(sys.stderr)
from twisted.internet import protocol, reactor, stdio
@implementer(interfaces.IHalfCloseableProtocol)
class Echo(protocol.Protocol):
def connectionMade(self):
print("connection made")
def dataReceived(self, data):
self.transport.write(data)
def readConnectionLost(self):
print("readConnectionLost")
self.transport.loseConnection()
def writeConnectionLost(self):
print("writeConnectionLost")
def connectionLost(self, reason):
print("connectionLost", reason)
reactor.stop()
stdio.StandardIO(Echo())
reactor.run()
|
amenonsen/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/network/ios/config/lldp_global/__init__.py
|
12133432
| |
coderabhishek/scrapy
|
refs/heads/master
|
scrapy/utils/__init__.py
|
12133432
| |
andela-bojengwa/talk
|
refs/heads/master
|
venv/lib/python2.7/site-packages/django/contrib/gis/db/__init__.py
|
12133432
| |
Meemaw/Eulers-Project
|
refs/heads/master
|
Problem_97.py
|
1
|
__author__ = 'Meemaw'
stevilo = 28433 * (2**7830457) + 1
print(str(stevilo)[-10:])
|
kyleabeauchamp/fah-projects
|
refs/heads/master
|
old/fix_pdbs.py
|
2
|
import simtk.openmm.app as app
import pdbfixer
mutation_string = "GLY-112-ALA"
fixer = pdbfixer.PDBFixer(pdbid='2LCB')
fixer.applyMutations([mutation_string])
fixer.findMissingResidues()
fixer.findNonstandardResidues()
fixer.replaceNonstandardResidues()
fixer.findMissingAtoms()
fixer.addMissingAtoms()
fixer.removeHeterogens(True)
fixer.addMissingHydrogens(7.0)
numChains = len(list(fixer.topology.chains()))
fixer.removeChains(range(1, numChains))
app.PDBFile.writeFile(fixer.topology, fixer.positions, open("./pdb_fixed/2LCB_%s.pdb" % mutation_string, 'w'))
fixer = pdbfixer.PDBFixer(pdbid='2LCB')
fixer.findMissingResidues()
fixer.findNonstandardResidues()
fixer.replaceNonstandardResidues()
fixer.findMissingAtoms()
fixer.addMissingAtoms()
fixer.removeHeterogens(True)
fixer.addMissingHydrogens(7.0)
numChains = len(list(fixer.topology.chains()))
fixer.removeChains(range(1, numChains))
app.PDBFile.writeFile(fixer.topology, fixer.positions, open("./pdb_fixed/2LCB.pdb", 'w'))
mutation_string = "GLY-112-ALA"
fixer = pdbfixer.PDBFixer(pdbid='3DMV')
fixer.applyMutations([mutation_string])
fixer.findMissingResidues()
fixer.findNonstandardResidues()
fixer.replaceNonstandardResidues()
fixer.findMissingAtoms()
fixer.addMissingAtoms()
fixer.removeHeterogens(True)
fixer.addMissingHydrogens(7.0)
numChains = len(list(fixer.topology.chains()))
fixer.removeChains(range(1, numChains))
app.PDBFile.writeFile(fixer.topology, fixer.positions, open("./pdb_fixed/3DMV_%s.pdb" % mutation_string, 'w'))
fixer = pdbfixer.PDBFixer(pdbid='3DMV')
fixer.findMissingResidues()
fixer.findNonstandardResidues()
fixer.replaceNonstandardResidues()
fixer.findMissingAtoms()
fixer.addMissingAtoms()
fixer.removeHeterogens(True)
fixer.addMissingHydrogens(7.0)
numChains = len(list(fixer.topology.chains()))
fixer.removeChains(range(1, numChains))
app.PDBFile.writeFile(fixer.topology, fixer.positions, open("./pdb_fixed/3DMV.pdb", 'w'))
|
btrent/knave
|
refs/heads/master
|
pychess/Utils/lutils/PolyglotHash.py
|
23
|
# -*- coding: UTF-8 -*-
# Polyglot opening books are indexed by 64-bit Zobrist hash keys.
# The standard specifies the following Zobrist seed values.
# The numbers in this file come from PolyGlot by Fabien Letouzey.
# PolyGlot is available under the GNU GPL from http://wbec-ridderkerk.nl
pieceHashes = [
[
[ 0x0000000000000000 ] * 64,
[ 0x5355f900c2a82dc7, 0x07fb9f855a997142, 0x5093417aa8a7ed5e, 0x7bcbc38da25a7f3c,
0x19fc8a768cf4b6d4, 0x637a7780decfc0d9, 0x8249a47aee0e41f7, 0x79ad695501e7d1e8,
0x14acbaf4777d5776, 0xf145b6beccdea195, 0xdabf2ac8201752fc, 0x24c3c94df9c8d3f6,
0xbb6e2924f03912ea, 0x0ce26c0b95c980d9, 0xa49cd132bfbf7cc4, 0xe99d662af4243939,
0x27e6ad7891165c3f, 0x8535f040b9744ff1, 0x54b3f4fa5f40d873, 0x72b12c32127fed2b,
0xee954d3c7b411f47, 0x9a85ac909a24eaa1, 0x70ac4cd9f04f21f5, 0xf9b89d3e99a075c2,
0x87b3e2b2b5c907b1, 0xa366e5b8c54f48b8, 0xae4a9346cc3f7cf2, 0x1920c04d47267bbd,
0x87bf02c6b49e2ae9, 0x092237ac237f3859, 0xff07f64ef8ed14d0, 0x8de8dca9f03cc54e,
0x9c1633264db49c89, 0xb3f22c3d0b0b38ed, 0x390e5fb44d01144b, 0x5bfea5b4712768e9,
0x1e1032911fa78984, 0x9a74acb964e78cb3, 0x4f80f7a035dafb04, 0x6304d09a0b3738c4,
0x2171e64683023a08, 0x5b9b63eb9ceff80c, 0x506aacf489889342, 0x1881afc9a3a701d6,
0x6503080440750644, 0xdfd395339cdbf4a7, 0xef927dbcf00c20f2, 0x7b32f7d1e03680ec,
0xb9fd7620e7316243, 0x05a7e8a57db91b77, 0xb5889c6e15630a75, 0x4a750a09ce9573f7,
0xcf464cec899a2f8a, 0xf538639ce705b824, 0x3c79a0ff5580ef7f, 0xede6c87f8477609d,
0x799e81f05bc93f31, 0x86536b8cf3428a8c, 0x97d7374c60087b73, 0xa246637cff328532,
0x043fcae60cc0eba0, 0x920e449535dd359e, 0x70eb093b15b290cc, 0x73a1921916591cbd,
],
[ 0xc547f57e42a7444e, 0x78e37644e7cad29e, 0xfe9a44e9362f05fa, 0x08bd35cc38336615,
0x9315e5eb3a129ace, 0x94061b871e04df75, 0xdf1d9f9d784ba010, 0x3bba57b68871b59d,
0xd2b7adeeded1f73f, 0xf7a255d83bc373f8, 0xd7f4f2448c0ceb81, 0xd95be88cd210ffa7,
0x336f52f8ff4728e7, 0xa74049dac312ac71, 0xa2f61bb6e437fdb5, 0x4f2a5cb07f6a35b3,
0x87d380bda5bf7859, 0x16b9f7e06c453a21, 0x7ba2484c8a0fd54e, 0xf3a678cad9a2e38c,
0x39b0bf7dde437ba2, 0xfcaf55c1bf8a4424, 0x18fcf680573fa594, 0x4c0563b89f495ac3,
0x40e087931a00930d, 0x8cffa9412eb642c1, 0x68ca39053261169f, 0x7a1ee967d27579e2,
0x9d1d60e5076f5b6f, 0x3810e399b6f65ba2, 0x32095b6d4ab5f9b1, 0x35cab62109dd038a,
0xa90b24499fcfafb1, 0x77a225a07cc2c6bd, 0x513e5e634c70e331, 0x4361c0ca3f692f12,
0xd941aca44b20a45b, 0x528f7c8602c5807b, 0x52ab92beb9613989, 0x9d1dfa2efc557f73,
0x722ff175f572c348, 0x1d1260a51107fe97, 0x7a249a57ec0c9ba2, 0x04208fe9e8f7f2d6,
0x5a110c6058b920a0, 0x0cd9a497658a5698, 0x56fd23c8f9715a4c, 0x284c847b9d887aae,
0x04feabfbbdb619cb, 0x742e1e651c60ba83, 0x9a9632e65904ad3c, 0x881b82a13b51b9e2,
0x506e6744cd974924, 0xb0183db56ffc6a79, 0x0ed9b915c66ed37e, 0x5e11e86d5873d484,
0xf678647e3519ac6e, 0x1b85d488d0f20cc5, 0xdab9fe6525d89021, 0x0d151d86adb73615,
0xa865a54edcc0f019, 0x93c42566aef98ffb, 0x99e7afeabe000731, 0x48cbff086ddf285a,
],
[ 0x23b70edb1955c4bf, 0xc330de426430f69d, 0x4715ed43e8a45c0a, 0xa8d7e4dab780a08d,
0x0572b974f03ce0bb, 0xb57d2e985e1419c7, 0xe8d9ecbe2cf3d73f, 0x2fe4b17170e59750,
0x11317ba87905e790, 0x7fbf21ec8a1f45ec, 0x1725cabfcb045b00, 0x964e915cd5e2b207,
0x3e2b8bcbf016d66d, 0xbe7444e39328a0ac, 0xf85b2b4fbcde44b7, 0x49353fea39ba63b1,
0x1dd01aafcd53486a, 0x1fca8a92fd719f85, 0xfc7c95d827357afa, 0x18a6a990c8b35ebd,
0xcccb7005c6b9c28d, 0x3bdbb92c43b17f26, 0xaa70b5b4f89695a2, 0xe94c39a54a98307f,
0xb7a0b174cff6f36e, 0xd4dba84729af48ad, 0x2e18bc1ad9704a68, 0x2de0966daf2f8b1c,
0xb9c11d5b1e43a07e, 0x64972d68dee33360, 0x94628d38d0c20584, 0xdbc0d2b6ab90a559,
0xd2733c4335c6a72f, 0x7e75d99d94a70f4d, 0x6ced1983376fa72b, 0x97fcaacbf030bc24,
0x7b77497b32503b12, 0x8547eddfb81ccb94, 0x79999cdff70902cb, 0xcffe1939438e9b24,
0x829626e3892d95d7, 0x92fae24291f2b3f1, 0x63e22c147b9c3403, 0xc678b6d860284a1c,
0x5873888850659ae7, 0x0981dcd296a8736d, 0x9f65789a6509a440, 0x9ff38fed72e9052f,
0xe479ee5b9930578c, 0xe7f28ecd2d49eecd, 0x56c074a581ea17fe, 0x5544f7d774b14aef,
0x7b3f0195fc6f290f, 0x12153635b2c0cf57, 0x7f5126dbba5e0ca7, 0x7a76956c3eafb413,
0x3d5774a11d31ab39, 0x8a1b083821f40cb4, 0x7b4a38e32537df62, 0x950113646d1d6e03,
0x4da8979a0041e8a9, 0x3bc36e078f7515d7, 0x5d0a12f27ad310d1, 0x7f9d1a2e1ebe1327,
],
[ 0xa09e8c8c35ab96de, 0xfa7e393983325753, 0xd6b6d0ecc617c699, 0xdfea21ea9e7557e3,
0xb67c1fa481680af8, 0xca1e3785a9e724e5, 0x1cfc8bed0d681639, 0xd18d8549d140caea,
0x4ed0fe7e9dc91335, 0xe4dbf0634473f5d2, 0x1761f93a44d5aefe, 0x53898e4c3910da55,
0x734de8181f6ec39a, 0x2680b122baa28d97, 0x298af231c85bafab, 0x7983eed3740847d5,
0x66c1a2a1a60cd889, 0x9e17e49642a3e4c1, 0xedb454e7badc0805, 0x50b704cab602c329,
0x4cc317fb9cddd023, 0x66b4835d9eafea22, 0x219b97e26ffc81bd, 0x261e4e4c0a333a9d,
0x1fe2cca76517db90, 0xd7504dfa8816edbb, 0xb9571fa04dc089c8, 0x1ddc0325259b27de,
0xcf3f4688801eb9aa, 0xf4f5d05c10cab243, 0x38b6525c21a42b0e, 0x36f60e2ba4fa6800,
0xeb3593803173e0ce, 0x9c4cd6257c5a3603, 0xaf0c317d32adaa8a, 0x258e5a80c7204c4b,
0x8b889d624d44885d, 0xf4d14597e660f855, 0xd4347f66ec8941c3, 0xe699ed85b0dfb40d,
0x2472f6207c2d0484, 0xc2a1e7b5b459aeb5, 0xab4f6451cc1d45ec, 0x63767572ae3d6174,
0xa59e0bd101731a28, 0x116d0016cb948f09, 0x2cf9c8ca052f6e9f, 0x0b090a7560a968e3,
0xabeeddb2dde06ff1, 0x58efc10b06a2068d, 0xc6e57a78fbd986e0, 0x2eab8ca63ce802d7,
0x14a195640116f336, 0x7c0828dd624ec390, 0xd74bbe77e6116ac7, 0x804456af10f5fb53,
0xebe9ea2adf4321c7, 0x03219a39ee587a30, 0x49787fef17af9924, 0xa1e9300cd8520548,
0x5b45e522e4b1b4ef, 0xb49c3b3995091a36, 0xd4490ad526f14431, 0x12a8f216af9418c2,
],
[ 0x6ffe73e81b637fb3, 0xddf957bc36d8b9ca, 0x64d0e29eea8838b3, 0x08dd9bdfd96b9f63,
0x087e79e5a57d1d13, 0xe328e230e3e2b3fb, 0x1c2559e30f0946be, 0x720bf5f26f4d2eaa,
0xb0774d261cc609db, 0x443f64ec5a371195, 0x4112cf68649a260e, 0xd813f2fab7f5c5ca,
0x660d3257380841ee, 0x59ac2c7873f910a3, 0xe846963877671a17, 0x93b633abfa3469f8,
0xc0c0f5a60ef4cdcf, 0xcaf21ecd4377b28c, 0x57277707199b8175, 0x506c11b9d90e8b1d,
0xd83cc2687a19255f, 0x4a29c6465a314cd1, 0xed2df21216235097, 0xb5635c95ff7296e2,
0x22af003ab672e811, 0x52e762596bf68235, 0x9aeba33ac6ecc6b0, 0x944f6de09134dfb6,
0x6c47bec883a7de39, 0x6ad047c430a12104, 0xa5b1cfdba0ab4067, 0x7c45d833aff07862,
0x5092ef950a16da0b, 0x9338e69c052b8e7b, 0x455a4b4cfe30e3f5, 0x6b02e63195ad0cf8,
0x6b17b224bad6bf27, 0xd1e0ccd25bb9c169, 0xde0c89a556b9ae70, 0x50065e535a213cf6,
0x9c1169fa2777b874, 0x78edefd694af1eed, 0x6dc93d9526a50e68, 0xee97f453f06791ed,
0x32ab0edb696703d3, 0x3a6853c7e70757a7, 0x31865ced6120f37d, 0x67fef95d92607890,
0x1f2b1d1f15f6dc9c, 0xb69e38a8965c6b65, 0xaa9119ff184cccf4, 0xf43c732873f24c13,
0xfb4a3d794a9a80d2, 0x3550c2321fd6109c, 0x371f77e76bb8417e, 0x6bfa9aae5ec05779,
0xcd04f3ff001a4778, 0xe3273522064480ca, 0x9f91508bffcfc14a, 0x049a7f41061a9e60,
0xfcb6be43a9f2fe9b, 0x08de8a1c7797da9b, 0x8f9887e6078735a1, 0xb5b4071dbfc73a66,
],
[ 0x55b6344cf97aafae, 0xb862225b055b6960, 0xcac09afbddd2cdb4, 0xdaf8e9829fe96b5f,
0xb5fdfc5d3132c498, 0x310cb380db6f7503, 0xe87fbb46217a360e, 0x2102ae466ebb1148,
0xf8549e1a3aa5e00d, 0x07a69afdcc42261a, 0xc4c118bfe78feaae, 0xf9f4892ed96bd438,
0x1af3dbe25d8f45da, 0xf5b4b0b0d2deeeb4, 0x962aceefa82e1c84, 0x046e3ecaaf453ce9,
0xf05d129681949a4c, 0x964781ce734b3c84, 0x9c2ed44081ce5fbd, 0x522e23f3925e319e,
0x177e00f9fc32f791, 0x2bc60a63a6f3b3f2, 0x222bbfae61725606, 0x486289ddcc3d6780,
0x7dc7785b8efdfc80, 0x8af38731c02ba980, 0x1fab64ea29a2ddf7, 0xe4d9429322cd065a,
0x9da058c67844f20c, 0x24c0e332b70019b0, 0x233003b5a6cfe6ad, 0xd586bd01c5c217f6,
0x5e5637885f29bc2b, 0x7eba726d8c94094b, 0x0a56a5f0bfe39272, 0xd79476a84ee20d06,
0x9e4c1269baa4bf37, 0x17efee45b0dee640, 0x1d95b0a5fcf90bc6, 0x93cbe0b699c2585d,
0x65fa4f227a2b6d79, 0xd5f9e858292504d5, 0xc2b5a03f71471a6f, 0x59300222b4561e00,
0xce2f8642ca0712dc, 0x7ca9723fbb2e8988, 0x2785338347f2ba08, 0xc61bb3a141e50e8c,
0x150f361dab9dec26, 0x9f6a419d382595f4, 0x64a53dc924fe7ac9, 0x142de49fff7a7c3d,
0x0c335248857fa9e7, 0x0a9c32d5eae45305, 0xe6c42178c4bbb92e, 0x71f1ce2490d20b07,
0xf1bcc3d275afe51a, 0xe728e8c83c334074, 0x96fbf83a12884624, 0x81a1549fd6573da5,
0x5fa7867caf35e149, 0x56986e2ef3ed091b, 0x917f1dd5f8886c61, 0xd20d8c88c8ffe65f,
],
],
[
[ 0x0000000000000000 ] * 64,
[ 0x9d39247e33776d41, 0x2af7398005aaa5c7, 0x44db015024623547, 0x9c15f73e62a76ae2,
0x75834465489c0c89, 0x3290ac3a203001bf, 0x0fbbad1f61042279, 0xe83a908ff2fb60ca,
0x0d7e765d58755c10, 0x1a083822ceafe02d, 0x9605d5f0e25ec3b0, 0xd021ff5cd13a2ed5,
0x40bdf15d4a672e32, 0x011355146fd56395, 0x5db4832046f3d9e5, 0x239f8b2d7ff719cc,
0x05d1a1ae85b49aa1, 0x679f848f6e8fc971, 0x7449bbff801fed0b, 0x7d11cdb1c3b7adf0,
0x82c7709e781eb7cc, 0xf3218f1c9510786c, 0x331478f3af51bbe6, 0x4bb38de5e7219443,
0xaa649c6ebcfd50fc, 0x8dbd98a352afd40b, 0x87d2074b81d79217, 0x19f3c751d3e92ae1,
0xb4ab30f062b19abf, 0x7b0500ac42047ac4, 0xc9452ca81a09d85d, 0x24aa6c514da27500,
0x4c9f34427501b447, 0x14a68fd73c910841, 0xa71b9b83461cbd93, 0x03488b95b0f1850f,
0x637b2b34ff93c040, 0x09d1bc9a3dd90a94, 0x3575668334a1dd3b, 0x735e2b97a4c45a23,
0x18727070f1bd400b, 0x1fcbacd259bf02e7, 0xd310a7c2ce9b6555, 0xbf983fe0fe5d8244,
0x9f74d14f7454a824, 0x51ebdc4ab9ba3035, 0x5c82c505db9ab0fa, 0xfcf7fe8a3430b241,
0x3253a729b9ba3dde, 0x8c74c368081b3075, 0xb9bc6c87167c33e7, 0x7ef48f2b83024e20,
0x11d505d4c351bd7f, 0x6568fca92c76a243, 0x4de0b0f40f32a7b8, 0x96d693460cc37e5d,
0x42e240cb63689f2f, 0x6d2bdcdae2919661, 0x42880b0236e4d951, 0x5f0f4a5898171bb6,
0x39f890f579f92f88, 0x93c5b5f47356388b, 0x63dc359d8d231b78, 0xec16ca8aea98ad76,
],
[ 0x56436c9fe1a1aa8d, 0xefac4b70633b8f81, 0xbb215798d45df7af, 0x45f20042f24f1768,
0x930f80f4e8eb7462, 0xff6712ffcfd75ea1, 0xae623fd67468aa70, 0xdd2c5bc84bc8d8fc,
0x7eed120d54cf2dd9, 0x22fe545401165f1c, 0xc91800e98fb99929, 0x808bd68e6ac10365,
0xdec468145b7605f6, 0x1bede3a3aef53302, 0x43539603d6c55602, 0xaa969b5c691ccb7a,
0xa87832d392efee56, 0x65942c7b3c7e11ae, 0xded2d633cad004f6, 0x21f08570f420e565,
0xb415938d7da94e3c, 0x91b859e59ecb6350, 0x10cff333e0ed804a, 0x28aed140be0bb7dd,
0xc5cc1d89724fa456, 0x5648f680f11a2741, 0x2d255069f0b7dab3, 0x9bc5a38ef729abd4,
0xef2f054308f6a2bc, 0xaf2042f5cc5c2858, 0x480412bab7f5be2a, 0xaef3af4a563dfe43,
0x19afe59ae451497f, 0x52593803dff1e840, 0xf4f076e65f2ce6f0, 0x11379625747d5af3,
0xbce5d2248682c115, 0x9da4243de836994f, 0x066f70b33fe09017, 0x4dc4de189b671a1c,
0x51039ab7712457c3, 0xc07a3f80c31fb4b4, 0xb46ee9c5e64a6e7c, 0xb3819a42abe61c87,
0x21a007933a522a20, 0x2df16f761598aa4f, 0x763c4a1371b368fd, 0xf793c46702e086a0,
0xd7288e012aeb8d31, 0xde336a2a4bc1c44b, 0x0bf692b38d079f23, 0x2c604a7a177326b3,
0x4850e73e03eb6064, 0xcfc447f1e53c8e1b, 0xb05ca3f564268d99, 0x9ae182c8bc9474e8,
0xa4fc4bd4fc5558ca, 0xe755178d58fc4e76, 0x69b97db1a4c03dfe, 0xf9b5b7c4acc67c96,
0xfc6a82d64b8655fb, 0x9c684cb6c4d24417, 0x8ec97d2917456ed0, 0x6703df9d2924e97e,
],
[ 0x7f9b6af1ebf78baf, 0x58627e1a149bba21, 0x2cd16e2abd791e33, 0xd363eff5f0977996,
0x0ce2a38c344a6eed, 0x1a804aadb9cfa741, 0x907f30421d78c5de, 0x501f65edb3034d07,
0x37624ae5a48fa6e9, 0x957baf61700cff4e, 0x3a6c27934e31188a, 0xd49503536abca345,
0x088e049589c432e0, 0xf943aee7febf21b8, 0x6c3b8e3e336139d3, 0x364f6ffa464ee52e,
0xd60f6dcedc314222, 0x56963b0dca418fc0, 0x16f50edf91e513af, 0xef1955914b609f93,
0x565601c0364e3228, 0xecb53939887e8175, 0xbac7a9a18531294b, 0xb344c470397bba52,
0x65d34954daf3cebd, 0xb4b81b3fa97511e2, 0xb422061193d6f6a7, 0x071582401c38434d,
0x7a13f18bbedc4ff5, 0xbc4097b116c524d2, 0x59b97885e2f2ea28, 0x99170a5dc3115544,
0x6f423357e7c6a9f9, 0x325928ee6e6f8794, 0xd0e4366228b03343, 0x565c31f7de89ea27,
0x30f5611484119414, 0xd873db391292ed4f, 0x7bd94e1d8e17debc, 0xc7d9f16864a76e94,
0x947ae053ee56e63c, 0xc8c93882f9475f5f, 0x3a9bf55ba91f81ca, 0xd9a11fbb3d9808e4,
0x0fd22063edc29fca, 0xb3f256d8aca0b0b9, 0xb03031a8b4516e84, 0x35dd37d5871448af,
0xe9f6082b05542e4e, 0xebfafa33d7254b59, 0x9255abb50d532280, 0xb9ab4ce57f2d34f3,
0x693501d628297551, 0xc62c58f97dd949bf, 0xcd454f8f19c5126a, 0xbbe83f4ecc2bdecb,
0xdc842b7e2819e230, 0xba89142e007503b8, 0xa3bc941d0a5061cb, 0xe9f6760e32cd8021,
0x09c7e552bc76492f, 0x852f54934da55cc9, 0x8107fccf064fcf56, 0x098954d51fff6580,
],
[ 0xda3a361b1c5157b1, 0xdcdd7d20903d0c25, 0x36833336d068f707, 0xce68341f79893389,
0xab9090168dd05f34, 0x43954b3252dc25e5, 0xb438c2b67f98e5e9, 0x10dcd78e3851a492,
0xdbc27ab5447822bf, 0x9b3cdb65f82ca382, 0xb67b7896167b4c84, 0xbfced1b0048eac50,
0xa9119b60369ffebd, 0x1fff7ac80904bf45, 0xac12fb171817eee7, 0xaf08da9177dda93d,
0x1b0cab936e65c744, 0xb559eb1d04e5e932, 0xc37b45b3f8d6f2ba, 0xc3a9dc228caac9e9,
0xf3b8b6675a6507ff, 0x9fc477de4ed681da, 0x67378d8eccef96cb, 0x6dd856d94d259236,
0xa319ce15b0b4db31, 0x073973751f12dd5e, 0x8a8e849eb32781a5, 0xe1925c71285279f5,
0x74c04bf1790c0efe, 0x4dda48153c94938a, 0x9d266d6a1cc0542c, 0x7440fb816508c4fe,
0x13328503df48229f, 0xd6bf7baee43cac40, 0x4838d65f6ef6748f, 0x1e152328f3318dea,
0x8f8419a348f296bf, 0x72c8834a5957b511, 0xd7a023a73260b45c, 0x94ebc8abcfb56dae,
0x9fc10d0f989993e0, 0xde68a2355b93cae6, 0xa44cfe79ae538bbe, 0x9d1d84fcce371425,
0x51d2b1ab2ddfb636, 0x2fd7e4b9e72cd38c, 0x65ca5b96b7552210, 0xdd69a0d8ab3b546d,
0x604d51b25fbf70e2, 0x73aa8a564fb7ac9e, 0x1a8c1e992b941148, 0xaac40a2703d9bea0,
0x764dbeae7fa4f3a6, 0x1e99b96e70a9be8b, 0x2c5e9deb57ef4743, 0x3a938fee32d29981,
0x26e6db8ffdf5adfe, 0x469356c504ec9f9d, 0xc8763c5b08d1908c, 0x3f6c6af859d80055,
0x7f7cc39420a3a545, 0x9bfb227ebdf4c5ce, 0x89039d79d6fc5c5c, 0x8fe88b57305e2ab6,
],
[ 0x001f837cc7350524, 0x1877b51e57a764d5, 0xa2853b80f17f58ee, 0x993e1de72d36d310,
0xb3598080ce64a656, 0x252f59cf0d9f04bb, 0xd23c8e176d113600, 0x1bda0492e7e4586e,
0x21e0bd5026c619bf, 0x3b097adaf088f94e, 0x8d14dedb30be846e, 0xf95cffa23af5f6f4,
0x3871700761b3f743, 0xca672b91e9e4fa16, 0x64c8e531bff53b55, 0x241260ed4ad1e87d,
0x106c09b972d2e822, 0x7fba195410e5ca30, 0x7884d9bc6cb569d8, 0x0647dfedcd894a29,
0x63573ff03e224774, 0x4fc8e9560f91b123, 0x1db956e450275779, 0xb8d91274b9e9d4fb,
0xa2ebee47e2fbfce1, 0xd9f1f30ccd97fb09, 0xefed53d75fd64e6b, 0x2e6d02c36017f67f,
0xa9aa4d20db084e9b, 0xb64be8d8b25396c1, 0x70cb6af7c2d5bcf0, 0x98f076a4f7a2322e,
0xbf84470805e69b5f, 0x94c3251f06f90cf3, 0x3e003e616a6591e9, 0xb925a6cd0421aff3,
0x61bdd1307c66e300, 0xbf8d5108e27e0d48, 0x240ab57a8b888b20, 0xfc87614baf287e07,
0xef02cdd06ffdb432, 0xa1082c0466df6c0a, 0x8215e577001332c8, 0xd39bb9c3a48db6cf,
0x2738259634305c14, 0x61cf4f94c97df93d, 0x1b6baca2ae4e125b, 0x758f450c88572e0b,
0x959f587d507a8359, 0xb063e962e045f54d, 0x60e8ed72c0dff5d1, 0x7b64978555326f9f,
0xfd080d236da814ba, 0x8c90fd9b083f4558, 0x106f72fe81e2c590, 0x7976033a39f7d952,
0xa4ec0132764ca04b, 0x733ea705fae4fa77, 0xb4d8f77bc3e56167, 0x9e21f4f903b33fd9,
0x9d765e419fb69f6d, 0xd30c088ba61ea5ef, 0x5d94337fbfaf7f5b, 0x1a4e4822eb4d7a59,
],
[ 0x230e343dfba08d33, 0x43ed7f5a0fae657d, 0x3a88a0fbbcb05c63, 0x21874b8b4d2dbc4f,
0x1bdea12e35f6a8c9, 0x53c065c6c8e63528, 0xe34a1d250e7a8d6b, 0xd6b04d3b7651dd7e,
0x5e90277e7cb39e2d, 0x2c046f22062dc67d, 0xb10bb459132d0a26, 0x3fa9ddfb67e2f199,
0x0e09b88e1914f7af, 0x10e8b35af3eeab37, 0x9eedeca8e272b933, 0xd4c718bc4ae8ae5f,
0x81536d601170fc20, 0x91b534f885818a06, 0xec8177f83f900978, 0x190e714fada5156e,
0xb592bf39b0364963, 0x89c350c893ae7dc1, 0xac042e70f8b383f2, 0xb49b52e587a1ee60,
0xfb152fe3ff26da89, 0x3e666e6f69ae2c15, 0x3b544ebe544c19f9, 0xe805a1e290cf2456,
0x24b33c9d7ed25117, 0xe74733427b72f0c1, 0x0a804d18b7097475, 0x57e3306d881edb4f,
0x4ae7d6a36eb5dbcb, 0x2d8d5432157064c8, 0xd1e649de1e7f268b, 0x8a328a1cedfe552c,
0x07a3aec79624c7da, 0x84547ddc3e203c94, 0x990a98fd5071d263, 0x1a4ff12616eefc89,
0xf6f7fd1431714200, 0x30c05b1ba332f41c, 0x8d2636b81555a786, 0x46c9feb55d120902,
0xccec0a73b49c9921, 0x4e9d2827355fc492, 0x19ebb029435dcb0f, 0x4659d2b743848a2c,
0x963ef2c96b33be31, 0x74f85198b05a2e7d, 0x5a0f544dd2b1fb18, 0x03727073c2e134b1,
0xc7f6aa2de59aea61, 0x352787baa0d7c22f, 0x9853eab63b5e0b35, 0xabbdcdd7ed5c0860,
0xcf05daf5ac8d77b0, 0x49cad48cebf4a71e, 0x7a4c10ec2158c4a6, 0xd9e92aa246bf719e,
0x13ae978d09fe5557, 0x730499af921549ff, 0x4e4b705b92903ba4, 0xff577222c14f0a3a,
],
],
]
epHashes = [0x70cc73d90bc26e24, 0xe21a6b35df0c3ad7, 0x003a93d8b2806962, 0x1c99ded33cb890a1,
0xcf3145de0add4289, 0xd0e4427a5514fb72, 0x77c621cc9fb3a483, 0x67a34dac4356550b ]
W_OOHash = 0x31d71dce64b2c310
W_OOOHash = 0xf165b587df898190
B_OOHash = 0xa57e6339dd2cf3a0
B_OOOHash = 0x1ef6e6dbb1961ec9
colorHash = 0xf8d626aaaf278509
|
xzYue/odoo
|
refs/heads/8.0
|
addons/base_iban/__init__.py
|
447
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base_iban
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
victorywang80/Maintenance
|
refs/heads/master
|
saltstack/src/salt/scripts.py
|
1
|
# -*- coding: utf-8 -*-
'''
This module contains the function calls to execute command line scipts
'''
# Import python libs
import os
import sys
# Import salt libs
import salt
import salt.cli
def salt_master():
'''
Start the salt-master.
'''
master = salt.Master()
master.start()
def salt_minion():
'''
Kick off a salt minion daemon.
'''
if '' in sys.path:
sys.path.remove('')
minion = salt.Minion()
minion.start()
def salt_syndic():
'''
Kick off a salt syndic daemon.
'''
pid = os.getpid()
try:
syndic = salt.Syndic()
syndic.start()
except KeyboardInterrupt:
os.kill(pid, 15)
def salt_key():
'''
Manage the authentication keys with salt-key.
'''
try:
saltkey = salt.cli.SaltKey()
saltkey.run()
except KeyboardInterrupt:
raise SystemExit('\nExiting gracefully on Ctrl-c')
def salt_cp():
'''
Publish commands to the salt system from the command line on the
master.
'''
try:
cp_ = salt.cli.SaltCP()
cp_.run()
except KeyboardInterrupt:
raise SystemExit('\nExiting gracefully on Ctrl-c')
def salt_call():
'''
Directly call a salt command in the modules, does not require a running
salt minion to run.
'''
if '' in sys.path:
sys.path.remove('')
try:
client = salt.cli.SaltCall()
client.run()
except KeyboardInterrupt:
raise SystemExit('\nExiting gracefully on Ctrl-c')
def salt_run():
'''
Execute a salt convenience routine.
'''
if '' in sys.path:
sys.path.remove('')
try:
client = salt.cli.SaltRun()
client.run()
except KeyboardInterrupt:
raise SystemExit('\nExiting gracefully on Ctrl-c')
def salt_ssh():
'''
Execute the salt-ssh system
'''
if '' in sys.path:
sys.path.remove('')
try:
client = salt.cli.SaltSSH()
client.run()
except KeyboardInterrupt:
raise SystemExit('\nExiting gracefully on Ctrl-c')
def salt_main():
'''
Publish commands to the salt system from the command line on the
master.
'''
if '' in sys.path:
sys.path.remove('')
try:
client = salt.cli.SaltCMD()
client.run()
except KeyboardInterrupt:
raise SystemExit('\nExiting gracefully on Ctrl-c')
|
brython-dev/brython
|
refs/heads/master
|
www/src/Lib/genericpath.py
|
25
|
"""
Path operations common to more than one OS
Do not use directly. The OS specific modules import the appropriate
functions from this module themselves.
"""
import os
import stat
__all__ = ['commonprefix', 'exists', 'getatime', 'getctime', 'getmtime',
'getsize', 'isdir', 'isfile', 'samefile', 'sameopenfile',
'samestat']
# Does a path exist?
# This is false for dangling symbolic links on systems that support them.
def exists(path):
"""Test whether a path exists. Returns False for broken symbolic links"""
try:
os.stat(path)
except (OSError, ValueError):
return False
return True
# This follows symbolic links, so both islink() and isdir() can be true
# for the same path on systems that support symlinks
def isfile(path):
"""Test whether a path is a regular file"""
try:
st = os.stat(path)
except (OSError, ValueError):
return False
return stat.S_ISREG(st.st_mode)
# Is a path a directory?
# This follows symbolic links, so both islink() and isdir()
# can be true for the same path on systems that support symlinks
def isdir(s):
"""Return true if the pathname refers to an existing directory."""
try:
st = os.stat(s)
except (OSError, ValueError):
return False
return stat.S_ISDIR(st.st_mode)
def getsize(filename):
"""Return the size of a file, reported by os.stat()."""
return os.stat(filename).st_size
def getmtime(filename):
"""Return the last modification time of a file, reported by os.stat()."""
return os.stat(filename).st_mtime
def getatime(filename):
"""Return the last access time of a file, reported by os.stat()."""
return os.stat(filename).st_atime
def getctime(filename):
"""Return the metadata change time of a file, reported by os.stat()."""
return os.stat(filename).st_ctime
# Return the longest prefix of all list elements.
def commonprefix(m):
"Given a list of pathnames, returns the longest common leading component"
if not m: return ''
# Some people pass in a list of pathname parts to operate in an OS-agnostic
# fashion; don't try to translate in that case as that's an abuse of the
# API and they are already doing what they need to be OS-agnostic and so
# they most likely won't be using an os.PathLike object in the sublists.
if not isinstance(m[0], (list, tuple)):
m = tuple(map(os.fspath, m))
s1 = min(m)
s2 = max(m)
for i, c in enumerate(s1):
if c != s2[i]:
return s1[:i]
return s1
# Are two stat buffers (obtained from stat, fstat or lstat)
# describing the same file?
def samestat(s1, s2):
"""Test whether two stat buffers reference the same file"""
return (s1.st_ino == s2.st_ino and
s1.st_dev == s2.st_dev)
# Are two filenames really pointing to the same file?
def samefile(f1, f2):
"""Test whether two pathnames reference the same actual file or directory
This is determined by the device number and i-node number and
raises an exception if an os.stat() call on either pathname fails.
"""
s1 = os.stat(f1)
s2 = os.stat(f2)
return samestat(s1, s2)
# Are two open files really referencing the same file?
# (Not necessarily the same file descriptor!)
def sameopenfile(fp1, fp2):
"""Test whether two open file objects reference the same file"""
s1 = os.fstat(fp1)
s2 = os.fstat(fp2)
return samestat(s1, s2)
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
# Generic implementation of splitext, to be parametrized with
# the separators
def _splitext(p, sep, altsep, extsep):
"""Split the extension from a pathname.
Extension is everything from the last dot to the end, ignoring
leading dots. Returns "(root, ext)"; ext may be empty."""
# NOTE: This code must work for text and bytes strings.
sepIndex = p.rfind(sep)
if altsep:
altsepIndex = p.rfind(altsep)
sepIndex = max(sepIndex, altsepIndex)
dotIndex = p.rfind(extsep)
if dotIndex > sepIndex:
# skip all leading dots
filenameIndex = sepIndex + 1
while filenameIndex < dotIndex:
if p[filenameIndex:filenameIndex+1] != extsep:
return p[:dotIndex], p[dotIndex:]
filenameIndex += 1
return p, p[:0]
def _check_arg_types(funcname, *args):
hasstr = hasbytes = False
for s in args:
if isinstance(s, str):
hasstr = True
elif isinstance(s, bytes):
hasbytes = True
else:
raise TypeError(f'{funcname}() argument must be str, bytes, or '
f'os.PathLike object, not {s.__class__.__name__!r}') from None
if hasstr and hasbytes:
raise TypeError("Can't mix strings and bytes in path components") from None
|
MPogson/Python_Koans
|
refs/heads/master
|
python2/runner/runner_tests/__init__.py
|
276
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Namespace: helpers_tests
|
golismero/golismero
|
refs/heads/master
|
thirdparty_libs/netaddr/ip/nmap.py
|
9
|
#-----------------------------------------------------------------------------
# Copyright (c) 2008-2013, David P. D. Moss. All rights reserved.
#
# Released under the BSD license. See the LICENSE file for details.
#-----------------------------------------------------------------------------
"""
Routines for dealing with nmap-style IPv4 address ranges.
Based on nmap's Target Specification :-
http://nmap.org/book/man-target-specification.html
"""
from netaddr.core import AddrFormatError
from netaddr.ip import IPAddress
from netaddr.compat import _iter_range, _is_str
#-----------------------------------------------------------------------------
def _nmap_octet_target_values(spec):
# Generates sequence of values for an individual octet as defined in the
# nmap Target Specification.
values = set()
for element in spec.split(','):
if '-' in element:
left, right = element.split('-', 1)
if not left:
left = 0
if not right:
right = 255
low = int(left)
high = int(right)
if not ((0 <= low <= 255) and (0 <= high <= 255)):
raise ValueError('octet value overflow for spec %s!' % spec)
if low > high:
raise ValueError('left side of hyphen must be < right %r' % element)
for octet in _iter_range(low, high + 1):
values.add(octet)
else:
octet = int(element)
if not (0 <= octet <= 255):
raise ValueError('octet value overflow for spec %s!' % spec)
values.add(octet)
return sorted(values)
#-----------------------------------------------------------------------------
def _generate_nmap_octet_ranges(nmap_target_spec):
# Generate 4 lists containing all octets defined by a given nmap Target
# specification.
if not _is_str(nmap_target_spec):
raise TypeError('string expected, not %s' % type(nmap_target_spec))
if not nmap_target_spec:
raise ValueError('nmap target specification cannot be blank!')
tokens = nmap_target_spec.split('.')
if len(tokens) != 4:
raise AddrFormatError('invalid nmap range: %s' % nmap_target_spec)
if tokens[0] == '-':
raise AddrFormatError('first octet cannot be a sole hyphen!')
return (_nmap_octet_target_values(tokens[0]),
_nmap_octet_target_values(tokens[1]),
_nmap_octet_target_values(tokens[2]),
_nmap_octet_target_values(tokens[3]))
#-----------------------------------------------------------------------------
def valid_nmap_range(nmap_target_spec):
"""
:param nmap_target_spec: an nmap-style IP range target specification.
:return: ``True`` if IP range target spec is valid, ``False`` otherwise.
"""
try:
_generate_nmap_octet_ranges(nmap_target_spec)
return True
except (TypeError, ValueError, AddrFormatError):
pass
return False
#-----------------------------------------------------------------------------
def iter_nmap_range(nmap_target_spec):
"""
The nmap security tool supports a custom type of IPv4 range using multiple
hyphenated octets. This generator provides iterators yielding IP addresses
according to this rule set.
:param nmap_target_spec: an nmap-style IP range target specification.
:return: an iterator producing IPAddress objects for each IP in the range.
"""
octet_ranges = _generate_nmap_octet_ranges(nmap_target_spec)
for w in octet_ranges[0]:
for x in octet_ranges[1]:
for y in octet_ranges[2]:
for z in octet_ranges[3]:
yield IPAddress("%d.%d.%d.%d" % (w, x, y, z))
|
proyectosdeley/proyectos_de_ley
|
refs/heads/master
|
proyectos_de_ley/pdl_scraper/spiders/proyecto_spider_2001.py
|
1
|
# -*- coding: utf-8 -*-
import re
import short_url
import scrapy
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from pdl_scraper.items import PdlScraperItem
LEGISLATURA = 2001
class ProyectoSpider2001(CrawlSpider):
name = "proyecto_2001"
allowed_domains = ["www2.congreso.gob.pe"]
rules = (
Rule(LinkExtractor(allow=('OpenDocument$',)), callback='parse_item'),
)
def __init__(self, *args, **kwargs):
super(ProyectoSpider2001, self).__init__(*args, **kwargs)
self.legislatura = LEGISLATURA
def start_requests(self):
base_url = (
'http://www2.congreso.gob.pe/Sicr/TraDocEstProc/CLProLey2001.nsf/PorNumeroInverso?OpenView&Start='
)
pages = range(1, 14900, 499)
for page in pages:
url = f'{base_url}{page}'
yield scrapy.Request(url=url)
def parse_item(self, response):
self.log("this is the url: %s" % response.url)
item = PdlScraperItem()
item['codigo'] = ''
item['legislatura'] = self.legislatura
item['numero_proyecto'] = ''
item['congresistas'] = ''
item['titulo'] = ''
item['short_url'] = ''
item['fecha_presentacion'] = ''
item['expediente'] = ''
item['seguimiento_page'] = ''
item['proponente'] = ''
item['grupo_parlamentario'] = ''
item['iniciativas_agrupadas'] = ''
item['nombre_comision'] = ''
item['titulo_de_ley'] = ''
item['numero_de_ley'] = ''
selectors = response.xpath("//input")
for sel in selectors:
attr_name = sel.xpath('@name').extract()[0]
if attr_name == 'CodIni':
item['codigo'] = sel.xpath('@value').extract()[0]
if attr_name == 'CodIni_web_1':
item['numero_proyecto'] = sel.xpath('@value').extract()[0]
if attr_name == 'DesPerio':
item['periodo'] = sel.xpath('@value').extract()[0]
if attr_name == 'DesLegis':
item['legislatura2'] = sel.xpath('@value').extract()[0]
if attr_name == 'fechapre':
item['fecha_presentacion'] = sel.xpath('@value').extract()[0]
if attr_name == 'DesPropo':
item['proponente'] = sel.xpath('@value').extract()[0]
if attr_name == 'DesGrupParla':
item['grupo_parlamentario'] = sel.xpath('@value').extract()[0]
if attr_name == 'TitIni':
item['titulo'] = sel.xpath('@value').extract()[0]
if attr_name == 'Titulo':
item['titulo2'] = sel.xpath('@value').extract()[0]
if attr_name == 'SumIni':
item['sumilla'] = sel.xpath('@value').extract()[0]
if attr_name == 'NomCongre':
item['congresistas'] = sel.xpath('@value').extract()[0]
if attr_name == 'CodIniSecu':
item['iniciativas_agrupadas'] = sel.xpath('@value').extract()[0]
if attr_name == 'NumLey':
item['numero_de_ley'] = sel.xpath('@value').extract()[0]
if attr_name == 'TitLey':
item['titulo_de_ley'] = sel.xpath('@value').extract()[0]
if attr_name == 'NombreDeLaComision':
item['nombre_comision'] = sel.xpath('@value').extract()[0]
item['expediente'] = "http://www2.congreso.gob.pe/sicr/tradocestproc/Expvirt_2011.nsf/" \
"visbusqptramdoc1621/{}?opendocument".format(item['codigo'])
item['seguimiento_page'] = response.url
for sel in response.xpath('//td[@width="112"]'):
if sel.xpath('font/text()').extract()[0] == 'Seguimiento':
item['seguimiento'] = sel.xpath('following-sibling::*//text()').extract()
item['short_url'] = self.create_shorturl(item['codigo'])
return item
# self.log("Worked on item %s." % str(item['codigo']))
# request = scrapy.Request(
# item['expediente'],
# callback=self.parse_pdfurl,
# )
# request.meta['item'] = item
# return request
def parse_pdfurl(self, response):
item = response.meta['item']
codigo = item['codigo']
for sel in response.xpath("//a"):
href = sel.xpath("@href").extract()[0]
patterns = [
"\$FILE\/" + str(codigo) + "\.pdf$",
"\$FILE\/.+" + str(codigo) + "[0-9]+\.*-?\.pdf$",
"\$FILE\/.+" + str(codigo) + "[0-9]+\.PDF$",
"\/PL" + str(codigo) + "[0-9]+-?\.+pdf",
]
for pattern in patterns:
pattern = re.compile(pattern, re.IGNORECASE)
if re.search(pattern, href):
self.log("Found pdfurl for code: %s" % str(codigo))
item['pdf_url'] = href
return item
self.log("We failed to parse pdfurl for this project %s:" % str(codigo))
item['pdf_url'] = ''
return item
def create_shorturl(self, codigo):
"""
Use "legislatura" and codigo to build a short url.
:param codigo: Code for Proyecto de ley "03774"
:return: 4aw8ym
"""
mystring = "%s%s" % (self.legislatura, codigo)
url = short_url.encode_url(int(mystring))
return url
|
bzennn/blog_flask
|
refs/heads/master
|
python/lib/python3.5/site-packages/pip/commands/completion.py
|
343
|
from __future__ import absolute_import
import sys
from pip.basecommand import Command
BASE_COMPLETION = """
# pip %(shell)s completion start%(script)s# pip %(shell)s completion end
"""
COMPLETION_SCRIPTS = {
'bash': """
_pip_completion()
{
COMPREPLY=( $( COMP_WORDS="${COMP_WORDS[*]}" \\
COMP_CWORD=$COMP_CWORD \\
PIP_AUTO_COMPLETE=1 $1 ) )
}
complete -o default -F _pip_completion pip
""", 'zsh': """
function _pip_completion {
local words cword
read -Ac words
read -cn cword
reply=( $( COMP_WORDS="$words[*]" \\
COMP_CWORD=$(( cword-1 )) \\
PIP_AUTO_COMPLETE=1 $words[1] ) )
}
compctl -K _pip_completion pip
""", 'fish': """
function __fish_complete_pip
set -lx COMP_WORDS (commandline -o) ""
set -lx COMP_CWORD (math (contains -i -- (commandline -t) $COMP_WORDS)-1)
set -lx PIP_AUTO_COMPLETE 1
string split \ -- (eval $COMP_WORDS[1])
end
complete -fa "(__fish_complete_pip)" -c pip
"""}
class CompletionCommand(Command):
"""A helper command to be used for command completion."""
name = 'completion'
summary = 'A helper command used for command completion.'
def __init__(self, *args, **kw):
super(CompletionCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'--bash', '-b',
action='store_const',
const='bash',
dest='shell',
help='Emit completion code for bash')
cmd_opts.add_option(
'--zsh', '-z',
action='store_const',
const='zsh',
dest='shell',
help='Emit completion code for zsh')
cmd_opts.add_option(
'--fish', '-f',
action='store_const',
const='fish',
dest='shell',
help='Emit completion code for fish')
self.parser.insert_option_group(0, cmd_opts)
def run(self, options, args):
"""Prints the completion code of the given shell"""
shells = COMPLETION_SCRIPTS.keys()
shell_options = ['--' + shell for shell in sorted(shells)]
if options.shell in shells:
script = COMPLETION_SCRIPTS.get(options.shell, '')
print(BASE_COMPLETION % {'script': script, 'shell': options.shell})
else:
sys.stderr.write(
'ERROR: You must pass %s\n' % ' or '.join(shell_options)
)
|
FNST-OpenStack/horizon
|
refs/heads/master
|
horizon/test/test_dashboards/cats/kittens/views.py
|
121
|
from horizon import views
class IndexView(views.APIView):
# A very simple class-based view...
template_name = 'cats/kittens/index.html'
def get_data(self, request, context, *args, **kwargs):
# Add data to the context here...
return context
|
ianmabie/uberpy
|
refs/heads/master
|
venv/lib/python2.7/site-packages/setuptools/__init__.py
|
136
|
"""Extensions to the 'distutils' for large or complex distributions"""
import os
import distutils.core
import distutils.filelist
from distutils.core import Command as _Command
from distutils.util import convert_path
from fnmatch import fnmatchcase
import setuptools.version
from setuptools.extension import Extension
from setuptools.dist import Distribution, Feature, _get_unpatched
from setuptools.depends import Require
from setuptools.compat import filterfalse
__all__ = [
'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require',
'find_packages'
]
__version__ = setuptools.version.__version__
bootstrap_install_from = None
# If we run 2to3 on .py files, should we also convert docstrings?
# Default: yes; assume that we can detect doctests reliably
run_2to3_on_doctests = True
# Standard package names for fixer packages
lib2to3_fixer_packages = ['lib2to3.fixes']
class PackageFinder(object):
@classmethod
def find(cls, where='.', exclude=(), include=('*',)):
"""Return a list all Python packages found within directory 'where'
'where' should be supplied as a "cross-platform" (i.e. URL-style)
path; it will be converted to the appropriate local path syntax.
'exclude' is a sequence of package names to exclude; '*' can be used
as a wildcard in the names, such that 'foo.*' will exclude all
subpackages of 'foo' (but not 'foo' itself).
'include' is a sequence of package names to include. If it's
specified, only the named packages will be included. If it's not
specified, all found packages will be included. 'include' can contain
shell style wildcard patterns just like 'exclude'.
The list of included packages is built up first and then any
explicitly excluded packages are removed from it.
"""
out = cls._find_packages_iter(convert_path(where))
out = cls.require_parents(out)
includes = cls._build_filter(*include)
excludes = cls._build_filter('ez_setup', '*__pycache__', *exclude)
out = filter(includes, out)
out = filterfalse(excludes, out)
return list(out)
@staticmethod
def require_parents(packages):
"""
Exclude any apparent package that apparently doesn't include its
parent.
For example, exclude 'foo.bar' if 'foo' is not present.
"""
found = []
for pkg in packages:
base, sep, child = pkg.rpartition('.')
if base and base not in found:
continue
found.append(pkg)
yield pkg
@staticmethod
def _candidate_dirs(base_path):
"""
Return all dirs in base_path that might be packages.
"""
has_dot = lambda name: '.' in name
for root, dirs, files in os.walk(base_path, followlinks=True):
# Exclude directories that contain a period, as they cannot be
# packages. Mutate the list to avoid traversal.
dirs[:] = filterfalse(has_dot, dirs)
for dir in dirs:
yield os.path.relpath(os.path.join(root, dir), base_path)
@classmethod
def _find_packages_iter(cls, base_path):
candidates = cls._candidate_dirs(base_path)
return (
path.replace(os.path.sep, '.')
for path in candidates
if cls._looks_like_package(os.path.join(base_path, path))
)
@staticmethod
def _looks_like_package(path):
return os.path.isfile(os.path.join(path, '__init__.py'))
@staticmethod
def _build_filter(*patterns):
"""
Given a list of patterns, return a callable that will be true only if
the input matches one of the patterns.
"""
return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)
class PEP420PackageFinder(PackageFinder):
@staticmethod
def _looks_like_package(path):
return True
find_packages = PackageFinder.find
setup = distutils.core.setup
_Command = _get_unpatched(_Command)
class Command(_Command):
__doc__ = _Command.__doc__
command_consumes_arguments = False
def __init__(self, dist, **kw):
# Add support for keyword arguments
_Command.__init__(self,dist)
for k,v in kw.items():
setattr(self,k,v)
def reinitialize_command(self, command, reinit_subcommands=0, **kw):
cmd = _Command.reinitialize_command(self, command, reinit_subcommands)
for k,v in kw.items():
setattr(cmd,k,v) # update command with keywords
return cmd
distutils.core.Command = Command # we can't patch distutils.cmd, alas
def findall(dir = os.curdir):
"""Find all files under 'dir' and return the list of full filenames
(relative to 'dir').
"""
all_files = []
for base, dirs, files in os.walk(dir, followlinks=True):
if base==os.curdir or base.startswith(os.curdir+os.sep):
base = base[2:]
if base:
files = [os.path.join(base, f) for f in files]
all_files.extend(filter(os.path.isfile, files))
return all_files
distutils.filelist.findall = findall # fix findall bug in distutils.
|
XiaochenCui/algorithm_submit
|
refs/heads/cuixiaochen
|
migrations/versions/7725bf413d_add_file_column.py
|
1
|
"""add file column
Revision ID: 7725bf413d
Revises: 29ba7ffdd86
Create Date: 2016-06-15 15:28:57.442712
"""
# revision identifiers, used by Alembic.
revision = '7725bf413d'
down_revision = '29ba7ffdd86'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('file_columns',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=64), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.add_column('file', sa.Column('file_column_id', sa.Integer(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('file', 'file_column_id')
op.drop_table('file_columns')
### end Alembic commands ###
|
barbagroup/PetIBM
|
refs/heads/master
|
examples/ibpm/cylinder2dRe550_GPU/scripts/plotVorticity.py
|
6
|
"""
Computes, plots, and saves the 2D vorticity field from a PetIBM simulation
after 1200 time steps (3 non-dimensional time-units).
"""
import pathlib
import h5py
import numpy
from matplotlib import pyplot
simu_dir = pathlib.Path(__file__).absolute().parents[1]
data_dir = simu_dir / 'output'
# Read vorticity field and its grid from files.
name = 'wz'
filepath = data_dir / 'grid.h5'
f = h5py.File(filepath, 'r')
x, y = f[name]['x'][:], f[name]['y'][:]
X, Y = numpy.meshgrid(x, y)
timestep = 1200
filepath = data_dir / '{:0>7}.h5'.format(timestep)
f = h5py.File(filepath, 'r')
wz = f[name][:]
# Read body coordinates from file.
filepath = simu_dir / 'circle.body'
with open(filepath, 'r') as infile:
xb, yb = numpy.loadtxt(infile, dtype=numpy.float64,
unpack=True, skiprows=1)
pyplot.rc('font', family='serif', size=16)
# Plot the filled contour of the vorticity.
fig, ax = pyplot.subplots(figsize=(6.0, 6.0))
ax.grid()
ax.set_xlabel('x')
ax.set_ylabel('y')
levels = numpy.linspace(-32.0, 32.0, 32)
ax.contour(X, Y, wz, levels=levels, colors='black')
ax.plot(xb, yb, color='red')
ax.set_xlim(-0.6, 1.6)
ax.set_ylim(-0.8, 0.8)
ax.set_aspect('equal')
fig.tight_layout()
pyplot.show()
# Save figure.
fig_dir = simu_dir / 'figures'
fig_dir.mkdir(parents=True, exist_ok=True)
filepath = fig_dir / 'wz{:0>7}.png'.format(timestep)
fig.savefig(str(filepath), dpi=300)
|
canglade/NLP
|
refs/heads/master
|
endpoints/bookstore-grpc/status.py
|
8
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager
import status
@contextmanager
def context(grpc_context):
"""A context manager that automatically handles KeyError."""
try:
yield
except KeyError as key_error:
grpc_context.code(status.Code.NOT_FOUND)
grpc_context.details(
'Unable to find the item keyed by {}'.format(key_error))
|
arnavd96/Cinemiezer
|
refs/heads/master
|
myvenv/lib/python3.4/site-packages/oauth2/__init__.py
|
12
|
"""
The MIT License
Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import base64
from hashlib import sha1
import time
import random
import hmac
import binascii
import httplib2
from ._compat import PY3
from ._compat import b
from ._compat import parse_qs
from ._compat import quote
from ._compat import STRING_TYPES
from ._compat import TEXT
from ._compat import u
from ._compat import unquote
from ._compat import unquote_to_bytes
from ._compat import urlencode
from ._compat import urlsplit
from ._compat import urlunsplit
from ._compat import urlparse
from ._compat import urlunparse
from ._version import __version__
OAUTH_VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
class Error(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occurred.'):
self._message = message
@property
def message(self):
"""A hack to get around the deprecation errors in 2.6."""
return self._message
def __str__(self):
return self._message
class MissingSignature(Error):
pass
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def build_xoauth_string(url, consumer, token=None):
"""Build an XOAUTH string for use in SMTP/IMPA authentication."""
request = Request.from_consumer_and_token(consumer, token,
"GET", url)
signing_method = SignatureMethod_HMAC_SHA1()
request.sign_request(signing_method, consumer, token)
params = []
for k, v in sorted(request.items()):
if v is not None:
params.append('%s="%s"' % (k, escape(v)))
return "%s %s %s" % ("GET", url, ','.join(params))
def to_unicode(s):
""" Convert to unicode, raise exception with instructive error
message if s is not unicode, ascii, or utf-8. """
if not isinstance(s, TEXT):
if not isinstance(s, bytes):
raise TypeError('You are required to pass either unicode or '
'bytes here, not: %r (%s)' % (type(s), s))
try:
s = s.decode('utf-8')
except UnicodeDecodeError as le:
raise TypeError('You are required to pass either a unicode '
'object or a utf-8-enccoded bytes string here. '
'You passed a bytes object which contained '
'non-utf-8: %r. The UnicodeDecodeError that '
'resulted from attempting to interpret it as '
'utf-8 was: %s'
% (s, le,))
return s
def to_utf8(s):
return to_unicode(s).encode('utf-8')
def to_unicode_if_string(s):
if isinstance(s, STRING_TYPES):
return to_unicode(s)
else:
return s
def to_utf8_if_string(s):
if isinstance(s, STRING_TYPES):
return to_utf8(s)
else:
return s
def to_unicode_optional_iterator(x):
"""
Raise TypeError if x is a str containing non-utf8 bytes or if x is
an iterable which contains such a str.
"""
if isinstance(x, STRING_TYPES):
return to_unicode(x)
try:
l = list(x)
except TypeError as e:
assert 'is not iterable' in str(e)
return x
else:
return [ to_unicode(e) for e in l ]
def to_utf8_optional_iterator(x):
"""
Raise TypeError if x is a str or if x is an iterable which
contains a str.
"""
if isinstance(x, STRING_TYPES):
return to_utf8(x)
try:
l = list(x)
except TypeError as e:
assert 'is not iterable' in str(e)
return x
else:
return [ to_utf8_if_string(e) for e in l ]
def escape(s):
"""Escape a URL including any /."""
if not isinstance(s, bytes):
s = s.encode('utf-8')
return quote(s, safe='~')
def generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.SystemRandom().randint(0, 9)) for i in range(length)])
def generate_verifier(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.SystemRandom().randint(0, 9)) for i in range(length)])
class Consumer(object):
"""A consumer of OAuth-protected services.
The OAuth consumer is a "third-party" service that wants to access
protected resources from an OAuth service provider on behalf of an end
user. It's kind of the OAuth client.
Usually a consumer must be registered with the service provider by the
developer of the consumer software. As part of that process, the service
provider gives the consumer a *key* and a *secret* with which the consumer
software can identify itself to the service. The consumer will include its
key in each request to identify itself, but will use its secret only when
signing requests, to prove that the request is from that particular
registered consumer.
Once registered, the consumer can then use its consumer credentials to ask
the service provider for a request token, kicking off the OAuth
authorization process.
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def __str__(self):
data = {'oauth_consumer_key': self.key,
'oauth_consumer_secret': self.secret}
return urlencode(data)
class Token(object):
"""An OAuth credential used to request authorization or a protected
resource.
Tokens in OAuth comprise a *key* and a *secret*. The key is included in
requests to identify the token being used, but the secret is used only in
the signature, to prove that the requester is who the server gave the
token to.
When first negotiating the authorization, the consumer asks for a *request
token* that the live user authorizes with the service provider. The
consumer then exchanges the request token for an *access token* that can
be used to access protected resources.
"""
key = None
secret = None
callback = None
callback_confirmed = None
verifier = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def set_callback(self, callback):
self.callback = callback
self.callback_confirmed = 'true'
def set_verifier(self, verifier=None):
if verifier is not None:
self.verifier = verifier
else:
self.verifier = generate_verifier()
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
return urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def to_string(self):
"""Returns this token as a plain string, suitable for storage.
The resulting string includes the token's secret, so you should never
send or store this string where a third party can read it.
"""
items = [
('oauth_token', self.key),
('oauth_token_secret', self.secret),
]
if self.callback_confirmed is not None:
items.append(('oauth_callback_confirmed', self.callback_confirmed))
return urlencode(items)
@staticmethod
def from_string(s):
"""Deserializes a token from a string like one returned by
`to_string()`."""
if not len(s):
raise ValueError("Invalid parameter string.")
params = parse_qs(u(s), keep_blank_values=False)
if not len(params):
raise ValueError("Invalid parameter string.")
try:
key = params['oauth_token'][0]
except Exception:
raise ValueError("'oauth_token' not found in OAuth request.")
try:
secret = params['oauth_token_secret'][0]
except Exception:
raise ValueError("'oauth_token_secret' not found in "
"OAuth request.")
token = Token(key, secret)
try:
token.callback_confirmed = params['oauth_callback_confirmed'][0]
except KeyError:
pass # 1.0, no callback confirmed.
return token
def __str__(self):
return self.to_string()
def setter(attr):
name = attr.__name__
def getter(self):
try:
return self.__dict__[name]
except KeyError:
raise AttributeError(name)
def deleter(self):
del self.__dict__[name]
return property(getter, attr, deleter)
class Request(dict):
"""The parameters and information for an HTTP request, suitable for
authorizing with OAuth credentials.
When a consumer wants to access a service's protected resources, it does
so using a signed HTTP request identifying itself (the consumer) with its
key, and providing an access token authorized by the end user to access
those resources.
"""
version = OAUTH_VERSION
def __init__(self, method=HTTP_METHOD, url=None, parameters=None,
body=b'', is_form_encoded=False):
if url is not None:
self.url = to_unicode(url)
self.method = method
if parameters is not None:
for k, v in parameters.items():
k = to_unicode(k)
v = to_unicode_optional_iterator(v)
self[k] = v
self.body = body
self.is_form_encoded = is_form_encoded
@setter
def url(self, value):
self.__dict__['url'] = value
if value is not None:
scheme, netloc, path, query, fragment = urlsplit(value)
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
if scheme not in ('http', 'https'):
raise ValueError("Unsupported URL %s (%s)." % (value, scheme))
# Normalized URL excludes params, query, and fragment.
self.normalized_url = urlunsplit((scheme, netloc, path, None, None))
else:
self.normalized_url = None
self.__dict__['url'] = None
@setter
def method(self, value):
self.__dict__['method'] = value.upper()
def _get_timestamp_nonce(self):
return self['oauth_timestamp'], self['oauth_nonce']
def get_nonoauth_parameters(self):
"""Get any non-OAuth parameters."""
return dict([(k, v) for k, v in self.items()
if not k.startswith('oauth_')])
def to_header(self, realm=''):
"""Serialize as a header for an HTTPAuth request."""
oauth_params = ((k, v) for k, v in self.items()
if k.startswith('oauth_'))
stringy_params = ((k, escape(v)) for k, v in oauth_params)
header_params = ('%s="%s"' % (k, v) for k, v in stringy_params)
params_header = ', '.join(header_params)
auth_header = 'OAuth realm="%s"' % realm
if params_header:
auth_header = "%s, %s" % (auth_header, params_header)
return {'Authorization': auth_header}
def to_postdata(self):
"""Serialize as post data for a POST request."""
items = []
for k, v in sorted(self.items()): # predictable for testing
items.append((k.encode('utf-8'), to_utf8_optional_iterator(v)))
# tell urlencode to deal with sequence values and map them correctly
# to resulting querystring. for example self["k"] = ["v1", "v2"] will
# result in 'k=v1&k=v2' and not k=%5B%27v1%27%2C+%27v2%27%5D
return urlencode(items, True).replace('+', '%20')
def to_url(self):
"""Serialize as a URL for a GET request."""
base_url = urlparse(self.url)
if PY3:
query = parse_qs(base_url.query)
for k, v in self.items():
query.setdefault(k, []).append(to_utf8_optional_iterator(v))
scheme = base_url.scheme
netloc = base_url.netloc
path = base_url.path
params = base_url.params
fragment = base_url.fragment
else:
query = parse_qs(to_utf8(base_url.query))
for k, v in self.items():
query.setdefault(to_utf8(k), []).append(to_utf8_optional_iterator(v))
scheme = to_utf8(base_url.scheme)
netloc = to_utf8(base_url.netloc)
path = to_utf8(base_url.path)
params = to_utf8(base_url.params)
fragment = to_utf8(base_url.fragment)
url = (scheme, netloc, path, params, urlencode(query, True), fragment)
return urlunparse(url)
def get_parameter(self, parameter):
ret = self.get(parameter)
if ret is None:
raise Error('Parameter not found: %s' % parameter)
return ret
def get_normalized_parameters(self):
"""Return a string that contains the parameters that must be signed."""
items = []
for key, value in self.items():
if key == 'oauth_signature':
continue
# 1.0a/9.1.1 states that kvp must be sorted by key, then by value,
# so we unpack sequence values into multiple items for sorting.
if isinstance(value, STRING_TYPES):
items.append((to_utf8_if_string(key), to_utf8(value)))
else:
try:
value = list(value)
except TypeError as e:
assert 'is not iterable' in str(e)
items.append((to_utf8_if_string(key), to_utf8_if_string(value)))
else:
items.extend((to_utf8_if_string(key), to_utf8_if_string(item)) for item in value)
# Include any query string parameters from the provided URL
query = urlparse(self.url)[4]
url_items = self._split_url_string(query).items()
url_items = [(to_utf8(k), to_utf8_optional_iterator(v)) for k, v in url_items if k != 'oauth_signature' ]
items.extend(url_items)
items.sort()
encoded_str = urlencode(items, True)
# Encode signature parameters per Oauth Core 1.0 protocol
# spec draft 7, section 3.6
# (http://tools.ietf.org/html/draft-hammer-oauth-07#section-3.6)
# Spaces must be encoded with "%20" instead of "+"
return encoded_str.replace('+', '%20').replace('%7E', '~')
def sign_request(self, signature_method, consumer, token):
"""Set the signature parameter to the result of sign."""
if not self.is_form_encoded:
# according to
# http://oauth.googlecode.com/svn/spec/ext/body_hash/1.0/oauth-bodyhash.html
# section 4.1.1 "OAuth Consumers MUST NOT include an
# oauth_body_hash parameter on requests with form-encoded
# request bodies."
self['oauth_body_hash'] = base64.b64encode(sha1(self.body).digest())
if 'oauth_consumer_key' not in self:
self['oauth_consumer_key'] = consumer.key
if token and 'oauth_token' not in self:
self['oauth_token'] = token.key
self['oauth_signature_method'] = signature_method.name
self['oauth_signature'] = signature_method.sign(self, consumer, token)
@classmethod
def make_timestamp(cls):
"""Get seconds since epoch (UTC)."""
return str(int(time.time()))
@classmethod
def make_nonce(cls):
"""Generate pseudorandom number."""
return str(random.SystemRandom().randint(0, 100000000))
@classmethod
def from_request(cls, http_method, http_url, headers=None, parameters=None,
query_string=None):
"""Combines multiple parameter sources."""
if parameters is None:
parameters = {}
# Headers
if headers:
auth_header = None
for k, v in headers.items():
if k.lower() == 'authorization' or \
k.upper() == 'HTTP_AUTHORIZATION':
auth_header = v
# Check that the authorization header is OAuth.
if auth_header and auth_header[:6] == 'OAuth ':
auth_header = auth_header[6:]
try:
# Get the parameters from the header.
header_params = cls._split_header(auth_header)
parameters.update(header_params)
except:
raise Error('Unable to parse OAuth parameters from '
'Authorization header.')
# GET or POST query string.
if query_string:
query_params = cls._split_url_string(query_string)
parameters.update(query_params)
# URL parameters.
param_str = urlparse(http_url)[4] # query
url_params = cls._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return cls(http_method, http_url, parameters)
return None
@classmethod
def from_consumer_and_token(cls, consumer, token=None,
http_method=HTTP_METHOD, http_url=None, parameters=None,
body=b'', is_form_encoded=False):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': consumer.key,
'oauth_timestamp': cls.make_timestamp(),
'oauth_nonce': cls.make_nonce(),
'oauth_version': cls.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
if token.verifier:
parameters['oauth_verifier'] = token.verifier
return cls(http_method, http_url, parameters, body=body,
is_form_encoded=is_form_encoded)
@classmethod
def from_token_and_callback(cls, token, callback=None,
http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return cls(http_method, http_url, parameters)
@staticmethod
def _split_header(header):
"""Turn Authorization: header into parameters."""
params = {}
parts = header.split(',')
for param in parts:
# Ignore realm parameter.
if param.find('realm') > -1:
continue
# Remove whitespace.
param = param.strip()
# Split key-value.
param_parts = param.split('=', 1)
# Remove quotes and unescape the value.
params[param_parts[0]] = unquote(param_parts[1].strip('\"'))
return params
@staticmethod
def _split_url_string(param_str):
"""Turn URL string into parameters."""
if not PY3:
# If passed unicode with quoted UTF8, Python2's parse_qs leaves
# mojibake'd uniocde after unquoting, so encode first.
param_str = b(param_str, 'utf-8')
parameters = parse_qs(param_str, keep_blank_values=True)
for k, v in parameters.items():
if len(v) == 1:
parameters[k] = unquote(v[0])
else:
parameters[k] = sorted([unquote(s) for s in v])
return parameters
class Client(httplib2.Http):
"""OAuthClient is a worker to attempt to execute a request."""
def __init__(self, consumer, token=None, **kwargs):
if consumer is not None and not isinstance(consumer, Consumer):
raise ValueError("Invalid consumer.")
if token is not None and not isinstance(token, Token):
raise ValueError("Invalid token.")
self.consumer = consumer
self.token = token
self.method = SignatureMethod_HMAC_SHA1()
super(Client, self).__init__(**kwargs)
def set_signature_method(self, method):
if not isinstance(method, SignatureMethod):
raise ValueError("Invalid signature method.")
self.method = method
def request(self, uri, method="GET", body=b'', headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS, connection_type=None):
DEFAULT_POST_CONTENT_TYPE = 'application/x-www-form-urlencoded'
if not isinstance(headers, dict):
headers = {}
if method == "POST":
headers['Content-Type'] = headers.get('Content-Type',
DEFAULT_POST_CONTENT_TYPE)
is_form_encoded = \
headers.get('Content-Type') == 'application/x-www-form-urlencoded'
if is_form_encoded and body:
parameters = parse_qs(body)
else:
parameters = None
req = Request.from_consumer_and_token(self.consumer,
token=self.token, http_method=method, http_url=uri,
parameters=parameters, body=body, is_form_encoded=is_form_encoded)
req.sign_request(self.method, self.consumer, self.token)
scheme, netloc, path, params, query, fragment = urlparse(uri)
realm = urlunparse((scheme, netloc, '', None, None, None))
if is_form_encoded:
body = req.to_postdata()
elif method == "GET":
uri = req.to_url()
else:
headers.update(req.to_header(realm=realm))
return httplib2.Http.request(self, uri, method=method, body=body,
headers=headers, redirections=redirections,
connection_type=connection_type)
class Server(object):
"""A skeletal implementation of a service provider, providing protected
resources to requests from authorized consumers.
This class implements the logic to check requests for authorization. You
can use it with your web server or web framework to protect certain
resources with OAuth.
"""
timestamp_threshold = 300 # In seconds, five minutes.
version = OAUTH_VERSION
signature_methods = None
def __init__(self, signature_methods=None):
self.signature_methods = signature_methods or {}
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.name] = signature_method
return self.signature_methods
def verify_request(self, request, consumer, token):
"""Verifies an api call and checks all the parameters."""
self._check_version(request)
self._check_signature(request, consumer, token)
parameters = request.get_nonoauth_parameters()
return parameters
def build_authenticate_header(self, realm=''):
"""Optional support for the authenticate header."""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def _check_version(self, request):
"""Verify the correct version of the request for this server."""
version = self._get_version(request)
if version and version != self.version:
raise Error('OAuth version %s not supported.' % str(version))
def _get_version(self, request):
"""Return the version of the request for this server."""
try:
version = request.get_parameter('oauth_version')
except:
version = OAUTH_VERSION
return version
def _get_signature_method(self, request):
"""Figure out the signature with some defaults."""
signature_method = request.get('oauth_signature_method')
if signature_method is None:
signature_method = SIGNATURE_METHOD
try:
# Get the signature method object.
return self.signature_methods[signature_method]
except KeyError:
signature_method_names = ', '.join(self.signature_methods.keys())
raise Error('Signature method %s not supported try one of the '
'following: %s'
% (signature_method, signature_method_names))
def _check_signature(self, request, consumer, token):
timestamp, nonce = request._get_timestamp_nonce()
self._check_timestamp(timestamp)
signature_method = self._get_signature_method(request)
signature = request.get('oauth_signature')
if signature is None:
raise MissingSignature('Missing oauth_signature.')
# Validate the signature.
valid = signature_method.check(request, consumer, token, signature)
if not valid:
key, base = signature_method.signing_base(request, consumer, token)
raise Error('Invalid signature. Expected signature base '
'string: %s' % base)
def _check_timestamp(self, timestamp):
"""Verify that timestamp is recentish."""
timestamp = int(timestamp)
now = int(time.time())
lapsed = now - timestamp
if lapsed > self.timestamp_threshold:
raise Error('Expired timestamp: given %d and now %s has a '
'greater difference than threshold %d' % (timestamp, now,
self.timestamp_threshold))
class SignatureMethod(object):
"""A way of signing requests.
The OAuth protocol lets consumers and service providers pick a way to sign
requests. This interface shows the methods expected by the other `oauth`
modules for signing requests. Subclass it and implement its methods to
provide a new way to sign requests.
"""
def signing_base(self, request, consumer, token): #pragma NO COVER
"""Calculates the string that needs to be signed.
This method returns a 2-tuple containing the starting key for the
signing and the message to be signed. The latter may be used in error
messages to help clients debug their software.
"""
raise NotImplementedError
def sign(self, request, consumer, token): #pragma NO COVER
"""Returns the signature for the given request, based on the consumer
and token also provided.
You should use your implementation of `signing_base()` to build the
message to sign. Otherwise it may be less useful for debugging.
"""
raise NotImplementedError
def check(self, request, consumer, token, signature):
"""Returns whether the given signature is the correct signature for
the given consumer and token signing the given request."""
built = self.sign(request, consumer, token)
return built == signature
class SignatureMethod_HMAC_SHA1(SignatureMethod):
name = 'HMAC-SHA1'
def signing_base(self, request, consumer, token):
if (not hasattr(request, 'normalized_url') or request.normalized_url is None):
raise ValueError("Base URL for request is not set.")
sig = (
escape(request.method),
escape(request.normalized_url),
escape(request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key.encode('ascii'), raw.encode('ascii')
def sign(self, request, consumer, token):
"""Builds the base signature string."""
key, raw = self.signing_base(request, consumer, token)
hashed = hmac.new(key, raw, sha1)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
class SignatureMethod_PLAINTEXT(SignatureMethod):
name = 'PLAINTEXT'
def signing_base(self, request, consumer, token):
"""Concatenates the consumer key and secret with the token's
secret."""
sig = '%s&' % escape(consumer.secret)
if token:
sig = sig + escape(token.secret)
return sig, sig
def sign(self, request, consumer, token):
key, raw = self.signing_base(request, consumer, token)
return raw.encode('utf8')
|
catapult-project/catapult-csm
|
refs/heads/master
|
third_party/gsutil/third_party/oauth2client/tests/test_file.py
|
17
|
#!/usr/bin/python2.4
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Oauth2client.file tests
Unit tests for oauth2client.file
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import copy
import datetime
import json
import os
import pickle
import stat
import tempfile
import unittest
from .http_mock import HttpMockSequence
import six
from oauth2client import file
from oauth2client import locked_file
from oauth2client import multistore_file
from oauth2client import util
from oauth2client.client import AccessTokenCredentials
from oauth2client.client import OAuth2Credentials
from six.moves import http_client
try:
# Python2
from future_builtins import oct
except:
pass
FILENAME = tempfile.mktemp('oauth2client_test.data')
class OAuth2ClientFileTests(unittest.TestCase):
def tearDown(self):
try:
os.unlink(FILENAME)
except OSError:
pass
def setUp(self):
try:
os.unlink(FILENAME)
except OSError:
pass
def create_test_credentials(self, client_id='some_client_id',
expiration=None):
access_token = 'foo'
client_secret = 'cOuDdkfjxxnv+'
refresh_token = '1/0/a.df219fjls0'
token_expiry = expiration or datetime.datetime.utcnow()
token_uri = 'https://www.google.com/accounts/o8/oauth2/token'
user_agent = 'refresh_checker/1.0'
credentials = OAuth2Credentials(
access_token, client_id, client_secret,
refresh_token, token_expiry, token_uri,
user_agent)
return credentials
def test_non_existent_file_storage(self):
s = file.Storage(FILENAME)
credentials = s.get()
self.assertEquals(None, credentials)
def test_no_sym_link_credentials(self):
if hasattr(os, 'symlink'):
SYMFILENAME = FILENAME + '.sym'
os.symlink(FILENAME, SYMFILENAME)
s = file.Storage(SYMFILENAME)
try:
s.get()
self.fail('Should have raised an exception.')
except file.CredentialsFileSymbolicLinkError:
pass
finally:
os.unlink(SYMFILENAME)
def test_pickle_and_json_interop(self):
# Write a file with a pickled OAuth2Credentials.
credentials = self.create_test_credentials()
f = open(FILENAME, 'wb')
pickle.dump(credentials, f)
f.close()
# Storage should be not be able to read that object, as the capability to
# read and write credentials as pickled objects has been removed.
s = file.Storage(FILENAME)
read_credentials = s.get()
self.assertEquals(None, read_credentials)
# Now write it back out and confirm it has been rewritten as JSON
s.put(credentials)
with open(FILENAME) as f:
data = json.load(f)
self.assertEquals(data['access_token'], 'foo')
self.assertEquals(data['_class'], 'OAuth2Credentials')
self.assertEquals(data['_module'], OAuth2Credentials.__module__)
def test_token_refresh_store_expired(self):
expiration = datetime.datetime.utcnow() - datetime.timedelta(minutes=15)
credentials = self.create_test_credentials(expiration=expiration)
s = file.Storage(FILENAME)
s.put(credentials)
credentials = s.get()
new_cred = copy.copy(credentials)
new_cred.access_token = 'bar'
s.put(new_cred)
access_token = '1/3w'
token_response = {'access_token': access_token, 'expires_in': 3600}
http = HttpMockSequence([
({'status': '200'}, json.dumps(token_response).encode('utf-8')),
])
credentials._refresh(http.request)
self.assertEquals(credentials.access_token, access_token)
def test_token_refresh_store_expires_soon(self):
# Tests the case where an access token that is valid when it is read from
# the store expires before the original request succeeds.
expiration = datetime.datetime.utcnow() + datetime.timedelta(minutes=15)
credentials = self.create_test_credentials(expiration=expiration)
s = file.Storage(FILENAME)
s.put(credentials)
credentials = s.get()
new_cred = copy.copy(credentials)
new_cred.access_token = 'bar'
s.put(new_cred)
access_token = '1/3w'
token_response = {'access_token': access_token, 'expires_in': 3600}
http = HttpMockSequence([
({'status': str(http_client.UNAUTHORIZED)}, b'Initial token expired'),
({'status': str(http_client.UNAUTHORIZED)}, b'Store token expired'),
({'status': str(http_client.OK)},
json.dumps(token_response).encode('utf-8')),
({'status': str(http_client.OK)},
b'Valid response to original request')
])
credentials.authorize(http)
http.request('https://example.com')
self.assertEqual(credentials.access_token, access_token)
def test_token_refresh_good_store(self):
expiration = datetime.datetime.utcnow() + datetime.timedelta(minutes=15)
credentials = self.create_test_credentials(expiration=expiration)
s = file.Storage(FILENAME)
s.put(credentials)
credentials = s.get()
new_cred = copy.copy(credentials)
new_cred.access_token = 'bar'
s.put(new_cred)
credentials._refresh(lambda x: x)
self.assertEquals(credentials.access_token, 'bar')
def test_token_refresh_stream_body(self):
expiration = datetime.datetime.utcnow() + datetime.timedelta(minutes=15)
credentials = self.create_test_credentials(expiration=expiration)
s = file.Storage(FILENAME)
s.put(credentials)
credentials = s.get()
new_cred = copy.copy(credentials)
new_cred.access_token = 'bar'
s.put(new_cred)
valid_access_token = '1/3w'
token_response = {'access_token': valid_access_token, 'expires_in': 3600}
http = HttpMockSequence([
({'status': str(http_client.UNAUTHORIZED)}, b'Initial token expired'),
({'status': str(http_client.UNAUTHORIZED)}, b'Store token expired'),
({'status': str(http_client.OK)},
json.dumps(token_response).encode('utf-8')),
({'status': str(http_client.OK)}, 'echo_request_body')
])
body = six.StringIO('streaming body')
credentials.authorize(http)
_, content = http.request('https://example.com', body=body)
self.assertEqual(content, 'streaming body')
self.assertEqual(credentials.access_token, valid_access_token)
def test_credentials_delete(self):
credentials = self.create_test_credentials()
s = file.Storage(FILENAME)
s.put(credentials)
credentials = s.get()
self.assertNotEquals(None, credentials)
s.delete()
credentials = s.get()
self.assertEquals(None, credentials)
def test_access_token_credentials(self):
access_token = 'foo'
user_agent = 'refresh_checker/1.0'
credentials = AccessTokenCredentials(access_token, user_agent)
s = file.Storage(FILENAME)
credentials = s.put(credentials)
credentials = s.get()
self.assertNotEquals(None, credentials)
self.assertEquals('foo', credentials.access_token)
mode = os.stat(FILENAME).st_mode
if os.name == 'posix':
self.assertEquals('0o600', oct(stat.S_IMODE(os.stat(FILENAME).st_mode)))
def test_read_only_file_fail_lock(self):
credentials = self.create_test_credentials()
open(FILENAME, 'a+b').close()
os.chmod(FILENAME, 0o400)
store = multistore_file.get_credential_storage(
FILENAME,
credentials.client_id,
credentials.user_agent,
['some-scope', 'some-other-scope'])
store.put(credentials)
if os.name == 'posix':
self.assertTrue(store._multistore._read_only)
os.chmod(FILENAME, 0o600)
def test_multistore_no_symbolic_link_files(self):
if hasattr(os, 'symlink'):
SYMFILENAME = FILENAME + 'sym'
os.symlink(FILENAME, SYMFILENAME)
store = multistore_file.get_credential_storage(
SYMFILENAME,
'some_client_id',
'user-agent/1.0',
['some-scope', 'some-other-scope'])
try:
store.get()
self.fail('Should have raised an exception.')
except locked_file.CredentialsFileSymbolicLinkError:
pass
finally:
os.unlink(SYMFILENAME)
def test_multistore_non_existent_file(self):
store = multistore_file.get_credential_storage(
FILENAME,
'some_client_id',
'user-agent/1.0',
['some-scope', 'some-other-scope'])
credentials = store.get()
self.assertEquals(None, credentials)
def test_multistore_file(self):
credentials = self.create_test_credentials()
store = multistore_file.get_credential_storage(
FILENAME,
credentials.client_id,
credentials.user_agent,
['some-scope', 'some-other-scope'])
store.put(credentials)
credentials = store.get()
self.assertNotEquals(None, credentials)
self.assertEquals('foo', credentials.access_token)
store.delete()
credentials = store.get()
self.assertEquals(None, credentials)
if os.name == 'posix':
self.assertEquals('0o600', oct(stat.S_IMODE(os.stat(FILENAME).st_mode)))
def test_multistore_file_custom_key(self):
credentials = self.create_test_credentials()
custom_key = {'myapp': 'testing', 'clientid': 'some client'}
store = multistore_file.get_credential_storage_custom_key(
FILENAME, custom_key)
store.put(credentials)
stored_credentials = store.get()
self.assertNotEquals(None, stored_credentials)
self.assertEqual(credentials.access_token, stored_credentials.access_token)
store.delete()
stored_credentials = store.get()
self.assertEquals(None, stored_credentials)
def test_multistore_file_custom_string_key(self):
credentials = self.create_test_credentials()
# store with string key
store = multistore_file.get_credential_storage_custom_string_key(
FILENAME, 'mykey')
store.put(credentials)
stored_credentials = store.get()
self.assertNotEquals(None, stored_credentials)
self.assertEqual(credentials.access_token, stored_credentials.access_token)
# try retrieving with a dictionary
store_dict = multistore_file.get_credential_storage_custom_string_key(
FILENAME, {'key': 'mykey'})
stored_credentials = store.get()
self.assertNotEquals(None, stored_credentials)
self.assertEqual(credentials.access_token, stored_credentials.access_token)
store.delete()
stored_credentials = store.get()
self.assertEquals(None, stored_credentials)
def test_multistore_file_backwards_compatibility(self):
credentials = self.create_test_credentials()
scopes = ['scope1', 'scope2']
# store the credentials using the legacy key method
store = multistore_file.get_credential_storage(
FILENAME, 'client_id', 'user_agent', scopes)
store.put(credentials)
# retrieve the credentials using a custom key that matches the legacy key
key = {'clientId': 'client_id', 'userAgent': 'user_agent',
'scope': util.scopes_to_string(scopes)}
store = multistore_file.get_credential_storage_custom_key(FILENAME, key)
stored_credentials = store.get()
self.assertEqual(credentials.access_token, stored_credentials.access_token)
def test_multistore_file_get_all_keys(self):
# start with no keys
keys = multistore_file.get_all_credential_keys(FILENAME)
self.assertEquals([], keys)
# store credentials
credentials = self.create_test_credentials(client_id='client1')
custom_key = {'myapp': 'testing', 'clientid': 'client1'}
store1 = multistore_file.get_credential_storage_custom_key(
FILENAME, custom_key)
store1.put(credentials)
keys = multistore_file.get_all_credential_keys(FILENAME)
self.assertEquals([custom_key], keys)
# store more credentials
credentials = self.create_test_credentials(client_id='client2')
string_key = 'string_key'
store2 = multistore_file.get_credential_storage_custom_string_key(
FILENAME, string_key)
store2.put(credentials)
keys = multistore_file.get_all_credential_keys(FILENAME)
self.assertEquals(2, len(keys))
self.assertTrue(custom_key in keys)
self.assertTrue({'key': string_key} in keys)
# back to no keys
store1.delete()
store2.delete()
keys = multistore_file.get_all_credential_keys(FILENAME)
self.assertEquals([], keys)
if __name__ == '__main__':
unittest.main()
|
lasalesi/erpnext
|
refs/heads/develop
|
erpnext/patches/v6_24/set_recurring_id.py
|
48
|
import frappe
def execute():
for doctype in ('Sales Order', 'Purchase Order', 'Sales Invoice',
'Purchase Invoice'):
frappe.reload_doctype(doctype)
frappe.db.sql('''update `tab{0}` set submit_on_creation=1, notify_by_email=1
where is_recurring=1'''.format(doctype))
frappe.db.sql('''update `tab{0}` set notify_by_email=1
where is_recurring=1'''.format(doctype))
frappe.db.sql('''update `tab{0}` set recurring_id = name
where is_recurring=1 and ifnull(recurring_id, '') = "" '''.format(doctype))
|
fyfcauc/android_external_chromium-org
|
refs/heads/du44
|
tools/telemetry/telemetry/page/actions/interact.py
|
23
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page.actions import page_action
class InteractAction(page_action.PageAction):
def __init__(self, attributes=None):
super(InteractAction, self).__init__(attributes)
def RunAction(self, page, tab, previous_action):
tab.ExecuteJavaScript(
'console.time("' + self.TimelineMarkerLabel() + '")')
raw_input("Interacting... Press Enter to continue.")
tab.ExecuteJavaScript(
'console.timeEnd("' + self.TimelineMarkerLabel() + '")')
def TimelineMarkerLabel(self):
return 'InteractAction::RunAction'
|
JioCloud/horizon
|
refs/heads/master
|
openstack_dashboard/dashboards/project/routers/__init__.py
|
12133432
| |
mailhexu/pyDFTutils
|
refs/heads/master
|
pyDFTutils/perovskite/__init__.py
|
12133432
| |
valentin-krasontovitsch/ansible
|
refs/heads/devel
|
lib/ansible/modules/remote_management/ucs/__init__.py
|
12133432
| |
moonboots/tensorflow
|
refs/heads/master
|
tensorflow/g3doc/how_tos/__init__.py
|
12133432
| |
saurabh6790/medsyn-lib1
|
refs/heads/master
|
__init__.py
|
12133432
| |
xhqu1981/pymatgen
|
refs/heads/master
|
pymatgen/analysis/elasticity/elastic.py
|
2
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, print_function, unicode_literals
from __future__ import absolute_import
from pymatgen.analysis.elasticity.tensors import Tensor, \
voigt_map as vmap, TensorCollection
from pymatgen.analysis.elasticity.stress import Stress
from pymatgen.analysis.elasticity.strain import Strain
from scipy.misc import factorial
from collections import OrderedDict
import numpy as np
import warnings
import itertools
import string
import sympy as sp
"""
This module provides a class used to describe the elastic tensor,
including methods used to fit the elastic tensor from linear response
stress-strain data
"""
__author__ = "Maarten de Jong, Joseph Montoya"
__copyright__ = "Copyright 2012, The Materials Project"
__credits__ = ("Ian Winter, Shyam Dwaraknath, "
"Mark Asta, Anubhav Jain")
__version__ = "1.0"
__maintainer__ = "Joseph Montoya"
__email__ = "montoyjh@lbl.gov"
__status__ = "Development"
__date__ = "March 22, 2012"
class NthOrderElasticTensor(Tensor):
"""
An object representing an nth-order tensor expansion
of the stress-strain constitutive equations
"""
def __new__(cls, input_array, check_rank=None, tol=1e-4):
obj = super(NthOrderElasticTensor, cls).__new__(
cls, input_array, check_rank=check_rank)
if obj.rank % 2 != 0:
raise ValueError("ElasticTensor must have even rank")
if not obj.is_voigt_symmetric(tol):
warnings.warn("Input elastic tensor does not satisfy "
"standard voigt symmetries")
return obj.view(cls)
@property
def order(self):
"""
Order of the elastic tensor
"""
return self.rank // 2
def calculate_stress(self, strain):
"""
Calculate's a given elastic tensor's contribution to the
stress using Einstein summation
Args:
strain (3x3 array-like): matrix corresponding to strain
"""
strain = np.array(strain)
if strain.shape == (6,):
strain = Strain.from_voigt(strain)
assert strain.shape == (3, 3), "Strain must be 3x3 or voigt-notation"
lc = string.ascii_lowercase[:self.rank-2]
lc_pairs = map(''.join, zip(*[iter(lc)]*2))
einsum_string = "ij" + lc + ',' + ','.join(lc_pairs) + "->ij"
einsum_args = [self] + [strain] * (self.order - 1)
stress_matrix = np.einsum(einsum_string, *einsum_args) \
/ factorial(self.order - 1)
return Stress(stress_matrix)
def energy_density(self, strain, convert_GPa_to_eV=True):
"""
Calculates the elastic energy density due to a strain
"""
e_density = np.sum(self.calculate_stress(strain)*strain) / self.order
if convert_GPa_to_eV:
e_density *= 0.000624151 # Conversion factor for GPa to eV/A^3
return e_density
@classmethod
def from_diff_fit(cls, strains, stresses, eq_stress=None,
order=2, tol=1e-10):
return cls(diff_fit(strains, stresses, eq_stress, order, tol)[order-2])
class ElasticTensor(NthOrderElasticTensor):
"""
This class extends Tensor to describe the 3x3x3x3
second-order elastic tensor, C_{ijkl}, with various
methods for estimating other properties derived from
the second order elastic tensor
"""
def __new__(cls, input_array, tol=1e-4):
"""
Create an ElasticTensor object. The constructor throws an error if
the shape of the input_matrix argument is not 3x3x3x3, i. e. in true
tensor notation. Issues a warning if the input_matrix argument does
not satisfy standard symmetries. Note that the constructor uses
__new__ rather than __init__ according to the standard method of
subclassing numpy ndarrays.
Args:
input_array (3x3x3x3 array-like): the 3x3x3x3 array-like
representing the elastic tensor
tol (float): tolerance for initial symmetry test of tensor
"""
obj = super(ElasticTensor, cls).__new__(cls, input_array,
check_rank=4, tol=tol)
return obj.view(cls)
@property
def compliance_tensor(self):
"""
returns the Voigt-notation compliance tensor,
which is the matrix inverse of the
Voigt-notation elastic tensor
"""
return np.linalg.inv(self.voigt)
@property
def k_voigt(self):
"""
returns the K_v bulk modulus
"""
return self.voigt[:3, :3].mean()
@property
def g_voigt(self):
"""
returns the G_v shear modulus
"""
return (2. * self.voigt[:3, :3].trace() -
np.triu(self.voigt[:3, :3]).sum() +
3 * self.voigt[3:, 3:].trace()) / 15.
@property
def k_reuss(self):
"""
returns the K_r bulk modulus
"""
return 1. / self.compliance_tensor[:3, :3].sum()
@property
def g_reuss(self):
"""
returns the G_r shear modulus
"""
return 15. / (8. * self.compliance_tensor[:3, :3].trace() -
4. * np.triu(self.compliance_tensor[:3, :3]).sum() +
3. * self.compliance_tensor[3:, 3:].trace())
@property
def k_vrh(self):
"""
returns the K_vrh (Voigt-Reuss-Hill) average bulk modulus
"""
return 0.5 * (self.k_voigt + self.k_reuss)
@property
def g_vrh(self):
"""
returns the G_vrh (Voigt-Reuss-Hill) average shear modulus
"""
return 0.5 * (self.g_voigt + self.g_reuss)
@property
def y_mod(self):
"""
Calculates Young's modulus (in SI units) using the Voigt-Reuss-Hill
averages of bulk and shear moduli
"""
return 9.e9 * self.k_vrh * self.g_vrh / (3. * self.k_vrh + self.g_vrh)
def trans_v(self, structure):
"""
Calculates transverse sound velocity (in SI units) using the
Voigt-Reuss-Hill average bulk modulus
Args:
structure: pymatgen structure object
Returns: transverse sound velocity (in SI units)
"""
nsites = structure.num_sites
volume = structure.volume
natoms = structure.composition.num_atoms
weight = structure.composition.weight
mass_density = 1.6605e3 * nsites * weight / (natoms * volume)
return (1e9 * self.g_vrh / mass_density) ** 0.5
def long_v(self, structure):
"""
Calculates longitudinal sound velocity (in SI units)
using the Voigt-Reuss-Hill average bulk modulus
Args:
structure: pymatgen structure object
Returns: longitudinal sound velocity (in SI units)
"""
nsites = structure.num_sites
volume = structure.volume
natoms = structure.composition.num_atoms
weight = structure.composition.weight
mass_density = 1.6605e3 * nsites * weight / (natoms * volume)
return (1e9 * (self.k_vrh + 4./3. * self.g_vrh) / mass_density) ** 0.5
def snyder_ac(self, structure):
"""
Calculates Snyder's acoustic sound velocity (in SI units)
Args:
structure: pymatgen structure object
Returns: Snyder's acoustic sound velocity (in SI units)
"""
nsites = structure.num_sites
volume = structure.volume
natoms = structure.composition.num_atoms
num_density = 1e30 * nsites / volume
tot_mass = sum([e.atomic_mass for e in structure.species])
avg_mass = 1.6605e-27 * tot_mass / natoms
return 0.38483*avg_mass * \
((self.long_v(structure) + 2.*self.trans_v(structure))/3.) ** 3.\
/ (300.*num_density ** (-2./3.) * nsites ** (1./3.))
def snyder_opt(self, structure):
"""
Calculates Snyder's optical sound velocity (in SI units)
Args:
structure: pymatgen structure object
Returns: Snyder's optical sound velocity (in SI units)
"""
nsites = structure.num_sites
volume = structure.volume
num_density = 1e30 * nsites / volume
return 1.66914e-23 * \
(self.long_v(structure) + 2.*self.trans_v(structure))/3. \
/ num_density ** (-2./3.) * (1 - nsites ** (-1./3.))
def snyder_total(self, structure):
"""
Calculates Snyder's total sound velocity (in SI units)
Args:
structure: pymatgen structure object
Returns: Snyder's total sound velocity (in SI units)
"""
return self.snyder_ac(structure) + self.snyder_opt(structure)
def clarke_thermalcond(self, structure):
"""
Calculates Clarke's thermal conductivity (in SI units)
Args:
structure: pymatgen structure object
Returns: Clarke's thermal conductivity (in SI units)
"""
nsites = structure.num_sites
volume = structure.volume
tot_mass = sum([e.atomic_mass for e in structure.species])
natoms = structure.composition.num_atoms
weight = structure.composition.weight
avg_mass = 1.6605e-27 * tot_mass / natoms
mass_density = 1.6605e3 * nsites * weight / (natoms * volume)
return 0.87 * 1.3806e-23 * avg_mass**(-2./3.) \
* mass_density**(1./6.) * self.y_mod**0.5
def cahill_thermalcond(self, structure):
"""
Calculates Cahill's thermal conductivity (in SI units)
Args:
structure: pymatgen structure object
Returns: Cahill's thermal conductivity (in SI units)
"""
nsites = structure.num_sites
volume = structure.volume
num_density = 1e30 * nsites / volume
return 1.3806e-23 / 2.48 * num_density**(2./3.) \
* (self.long_v(structure) + 2 * self.trans_v(structure))
def debye_temperature(self, structure):
"""
Calculates the debye temperature (in SI units)
Args:
structure: pymatgen structure object
Returns: debye temperature (in SI units)
"""
nsites = structure.num_sites
volume = structure.volume
tot_mass = sum([e.atomic_mass for e in structure.species])
natoms = structure.composition.num_atoms
weight = structure.composition.weight
avg_mass = 1.6605e-27 * tot_mass / natoms
mass_density = 1.6605e3 * nsites * weight / (natoms * volume)
return 2.589e-11 * avg_mass**(-1./3.) * mass_density**(-1./6.) \
* self.y_mod**0.5
def debye_temperature_gibbs(self, structure):
"""
Calculates the debye temperature accordings to the GIBBS
formulation (in SI units)
Args:
structure: pymatgen structure object
Returns: debye temperature (in SI units)
"""
volume = structure.volume
tot_mass = sum([e.atomic_mass for e in structure.species])
natoms = structure.composition.num_atoms
avg_mass = 1.6605e-27 * tot_mass / natoms
t = self.homogeneous_poisson
f = (3.*(2.*(2./3.*(1. + t)/(1. - 2.*t))**1.5 +
(1./3.*(1. + t)/(1. - t))**1.5)**-1) ** (1./3.)
return 2.9772e-11 * avg_mass**(-1./2.) * (volume / natoms) ** (-1./6.) \
* f * self.k_vrh ** 0.5
@property
def universal_anisotropy(self):
"""
returns the universal anisotropy value
"""
return 5. * self.g_voigt / self.g_reuss + \
self.k_voigt / self.k_reuss - 6.
@property
def homogeneous_poisson(self):
"""
returns the homogeneous poisson ratio
"""
return (1. - 2. / 3. * self.g_vrh / self.k_vrh) / \
(2. + 2. / 3. * self.g_vrh / self.k_vrh)
@property
def property_dict(self):
"""
returns a dictionary of properties derived from the elastic tensor
"""
props = ["k_voigt", "k_reuss", "k_vrh", "g_voigt", "g_reuss", "g_vrh",
"universal_anisotropy", "homogeneous_poisson", "y_mod"]
return {prop: getattr(self, prop) for prop in props}
def get_structure_property_dict(self, structure, include_base_props=True):
"""
returns a dictionary of properties derived from the elastic tensor
and an associated structure
"""
s_props = ["trans_v", "long_v", "snyder_ac", "snyder_opt",
"snyder_total", "clarke_thermalcond", "cahill_thermalcond",
"debye_temperature", "debye_temperature_gibbs"]
sp_dict = {prop: getattr(self, prop)(structure) for prop in s_props}
sp_dict["structure"] = structure
if include_base_props:
sp_dict.update(self.property_dict)
return sp_dict
@classmethod
def from_pseudoinverse(cls, strains, stresses):
"""
Class method to fit an elastic tensor from stress/strain
data. Method uses Moore-Penrose pseudoinverse to invert
the s = C*e equation with elastic tensor, stress, and
strain in voigt notation
Args:
stresses (Nx3x3 array-like): list or array of stresses
strains (Nx3x3 array-like): list or array of strains
"""
# convert the stress/strain to Nx6 arrays of voigt-notation
warnings.warn("Pseudoinverse fitting of Strain/Stress lists may yield "
"questionable results from vasp data, use with caution.")
stresses = np.array([Stress(stress).voigt for stress in stresses])
with warnings.catch_warnings(record=True):
strains = np.array([Strain(strain).voigt for strain in strains])
voigt_fit = np.transpose(np.dot(np.linalg.pinv(strains), stresses))
return cls.from_voigt(voigt_fit)
@classmethod
def from_stress_dict(cls, stress_dict, vasp=True):
"""
Constructs the elastic tensor from IndependentStrain-Stress dictionary
corresponding to legacy behavior of elasticity package.
Args:
stress_dict (dict): dictionary of stresses indexed by corresponding
IndependentStrain objects.
vasp (boolean): flag for whether the stress tensor should be
converted based on vasp units/convention for stress
"""
c_ij = np.zeros((6, 6))
for i, j in itertools.product(range(6), repeat=2):
strains = [s for s in stress_dict.keys()
if s.ij == vmap[i]]
xy = [(s[vmap[i]], stress_dict[s][vmap[j]]) for s in strains]
if len(xy) == 0:
raise ValueError("No ind. strains for vgt index {}".format(i))
elif len(xy) == 1:
xy += [(0, 0)] # Fit through 0
c_ij[i, j] = np.polyfit(*zip(*xy), deg=1)[0]
if vasp:
c_ij *= -0.1 # Convert units/sign convention of vasp stress tensor
c_ij[0:, 3:] = 0.5 * c_ij[0:, 3:] # for vgt doubling of e4,e5,e6
c = cls.from_voigt(c_ij)
c = c.zeroed()
return c
class ElasticTensorExpansion(TensorCollection):
"""
This class is a sequence of elastic tensors corresponding
to an elastic tensor expansion, which can be used to
calculate stress and energy density and inherits all
of the list-based properties of TensorCollection
(e. g. symmetrization, voigt conversion, etc.)
"""
def __init__(self, c_list):
"""
Initialization method for ElasticTensorExpansion
Args:
c_list (list or tuple): sequence of Tensor inputs
or tensors from which the elastic tensor
expansion is constructed.
"""
c_list = [NthOrderElasticTensor(c, check_rank=4+i*2)
for i, c in enumerate(c_list)]
super(ElasticTensorExpansion, self).__init__(c_list)
@classmethod
def from_diff_fit(cls, strains, stresses, eq_stress=None,
tol=1e-10, order=3):
"""
Generates an elastic tensor expansion via the fitting function
defined below in diff_fit
"""
c_list = diff_fit(strains, stresses, eq_stress, order, tol)
return cls(c_list)
@property
def order(self):
"""
Order of the elastic tensor expansion, i. e. the order of the
highest included set of elastic constants
"""
return self[-1].order
def calculate_stress(self, strain):
"""
Calculate's a given elastic tensor's contribution to the
stress using Einstein summation
"""
return sum([c.calculate_stress(strain) for c in self])
def energy_density(self, strain, convert_GPa_to_eV=True):
"""
Calculates the elastic energy density due to a strain
"""
return sum([c.energy_density(strain, convert_GPa_to_eV)
for c in self])
def diff_fit(strains, stresses, eq_stress=None, order=2, tol=1e-10):
"""
nth order elastic constant fitting function based on
central-difference derivatives with respect to distinct
strain states. The algorithm is summarized as follows:
1. Identify distinct strain states as sets of indices
for which nonzero strain values exist, typically
[(0), (1), (2), (3), (4), (5), (0, 1) etc.]
2. For each strain state, find and sort strains and
stresses by strain value.
3. Find first, second .. nth derivatives of each stress
with respect to scalar variable corresponding to
the smallest perturbation in the strain.
4. Use the pseudoinverse of a matrix-vector expression
corresponding to the parameterized stress-strain
relationship and multiply that matrix by the respective
calculated first or second derivatives from the
previous step.
5. Place the calculated nth-order elastic
constants appropriately.
Args:
order (int): order of the elastic tensor set to return
strains (nx3x3 array-like): Array of 3x3 strains
to use in fitting of ECs
stresses (nx3x3 array-like): Array of 3x3 stresses
to use in fitting ECs. These should be PK2 stresses.
eq_stress (3x3 array-like): stress corresponding to
equilibrium strain (i. e. "0" strain state).
If not specified, function will try to find
the state in the list of provided stresses
and strains. If not found, defaults to 0.
tol (float): value for which strains below
are ignored in identifying strain states.
Returns:
Set of tensors corresponding to nth order expansion of
the stress/strain relation
"""
strain_state_dict = get_strain_state_dict(
strains, stresses, eq_stress=eq_stress, tol=tol,
add_eq=True, sort=True)
# Collect derivative data
c_list = []
dei_dsi = np.zeros((order - 1, 6, len(strain_state_dict)))
for n, (strain_state, data) in enumerate(strain_state_dict.items()):
hvec = data["strains"][:, strain_state.index(1)]
for i in range(1, order):
coef = get_diff_coeff(hvec, i)
dei_dsi[i-1, :, n] = np.dot(coef, data["stresses"])
m, absent = generate_pseudo(list(strain_state_dict.keys()), order)
for i in range(1, order):
cvec, carr = get_symbol_list(i+1)
svec = np.ravel(dei_dsi[i-1].T)
cmap = dict(zip(cvec, np.dot(m[i-1], svec)))
c_list.append(v_subs(carr, cmap))
return [Tensor.from_voigt(c) for c in c_list]
def find_eq_stress(strains, stresses, tol=1e-10):
"""
Finds stress corresponding to zero strain state in stress-strain list
Args:
strains (Nx3x3 array-like): array corresponding to strains
stresses (Nx3x3 array-like): array corresponding to stresses
tol (float): tolerance to find zero strain state
"""
stress_array = np.array(stresses)
strain_array = np.array(strains)
eq_stress = stress_array[np.all(abs(strain_array)<tol, axis=(1,2))]
if eq_stress.size != 0:
all_same = (abs(eq_stress - eq_stress[0]) < 1e-8).all()
if len(eq_stress) > 1 and not all_same:
raise ValueError("Multiple stresses found for equilibrium strain"
" state, please specify equilibrium stress or "
" remove extraneous stresses.")
eq_stress = eq_stress[0]
else:
warnings.warn("No eq state found, returning zero voigt stress")
eq_stress = Stress(np.zeros((3, 3)))
return eq_stress
def get_strain_state_dict(strains, stresses, eq_stress=None,
tol=1e-10, add_eq=True, sort=True):
"""
Creates a dictionary of voigt-notation stress-strain sets
keyed by "strain state", i. e. a tuple corresponding to
the non-zero entries in ratios to the lowest nonzero value,
e.g. [0, 0.1, 0, 0.2, 0, 0] -> (0,1,0,2,0,0)
This allows strains to be collected in stencils as to
evaluate parameterized finite difference derivatives
Args:
strains (Nx3x3 array-like): strain matrices
stresses (Nx3x3 array-like): stress matrices
eq_stress (Nx3x3 array-like): equilibrium stress
tol (float): tolerance for sorting strain states
add_eq (bool): flag for whether to add eq_strain
to stress-strain sets for each strain state
sort (bool): flag for whether to sort strain states
Returns:
OrderedDict with strain state keys and dictionaries
with stress-strain data corresponding to strain state
"""
# Recast stress/strains
vstrains = np.array([Strain(s).zeroed(tol).voigt for s in strains])
vstresses = np.array([Stress(s).zeroed(tol).voigt for s in stresses])
# Collect independent strain states:
independent = set([tuple(np.nonzero(vstrain)[0].tolist())
for vstrain in vstrains])
strain_state_dict = OrderedDict()
if add_eq:
if eq_stress is not None:
veq_stress = Stress(eq_stress).voigt
else:
veq_stress = find_eq_stress(strains, stresses).voigt
for n, ind in enumerate(independent):
# match strains with templates
template = np.zeros(6, dtype=bool)
np.put(template, ind, True)
template = np.tile(template, [vstresses.shape[0], 1])
mode = (template == (np.abs(vstrains) > 1e-10)).all(axis=1)
mstresses = vstresses[mode]
mstrains = vstrains[mode]
if add_eq:
# add zero strain state
mstrains = np.vstack([mstrains, np.zeros(6)])
mstresses = np.vstack([mstresses, veq_stress])
# sort strains/stresses by strain values
if sort:
mstresses = mstresses[mstrains[:, ind[0]].argsort()]
mstrains = mstrains[mstrains[:, ind[0]].argsort()]
# Get "strain state", i.e. ratio of each value to minimum strain
strain_state = mstrains[-1] / np.min(np.take(mstrains[-1], ind))
strain_state = tuple(strain_state)
strain_state_dict[strain_state] = {"strains": mstrains,
"stresses": mstresses}
return strain_state_dict
def generate_pseudo(strain_states, order=3):
"""
Generates the pseudoinverse for a given set of strains.
Args:
strain_states (6xN array like): a list of voigt-notation
"strain-states", i. e. perturbed indices of the strain
as a function of the smallest strain e. g. (0, 1, 0, 0, 1, 0)
order (int): order of pseudoinverse to calculate
Returns:
mis: pseudo inverses for each order tensor, these can
be multiplied by the central difference derivative
of the stress with respect to the strain state
absent_syms: symbols of the tensor absent from the PI
expression
"""
s = sp.Symbol('s')
nstates = len(strain_states)
ni = np.array(strain_states)*s
mis, absent_syms = [], []
for degree in range(2, order + 1):
cvec, carr = get_symbol_list(degree)
sarr = np.zeros((nstates, 6), dtype=object)
for n, strain_v in enumerate(ni):
# Get expressions
exps = carr.copy()
for i in range(degree - 1):
exps = np.dot(exps, strain_v)
exps /= np.math.factorial(degree - 1)
sarr[n] = [sp.diff(exp, s, degree - 1) for exp in exps]
svec = sarr.ravel()
present_syms = set.union(*[exp.atoms(sp.Symbol) for exp in svec])
absent_syms += [set(cvec) - present_syms]
m = np.zeros((6*nstates, len(cvec)))
for n, c in enumerate(cvec):
m[:, n] = v_diff(svec, c)
mis.append(np.linalg.pinv(m))
return mis, absent_syms
def get_symbol_list(rank, dim=6):
"""
Returns a symbolic representation of the voigt-notation
tensor that places identical symbols for entries related
by index transposition, i. e. C_1121 = C_1211 etc.
Args:
dim (int): dimension of matrix/tensor, e. g. 6 for
voigt notation and 3 for standard
rank (int): rank of tensor, e. g. 3 for third-order ECs
Returns:
c_vec (array): array representing distinct indices
c_arr (array): array representing tensor with equivalent
indices assigned as above
"""
indices = list(
itertools.combinations_with_replacement(range(dim), r=rank))
c_vec = np.zeros(len(indices), dtype=object)
c_arr = np.zeros([dim]*rank, dtype=object)
for n, idx in enumerate(indices):
c_vec[n] = sp.Symbol('c_'+''.join([str(i) for i in idx]))
for perm in itertools.permutations(idx):
c_arr[perm] = c_vec[n]
return c_vec, c_arr
def subs(entry, cmap):
"""
Sympy substitution function, primarily for the purposes
of numpy vectorization
Args:
entry (symbol or exp): sympy expr to undergo subs
cmap (dict): map for symbols to values to use in subs
Returns:
Evaluated expression with substitution
"""
return entry.subs(cmap)
# Vectorized functions
v_subs = np.vectorize(subs)
v_diff = np.vectorize(sp.diff)
def get_diff_coeff(hvec, n=1):
"""
Helper function to find difference coefficients of an
derivative on an arbitrary mesh.
Args:
hvec (1D array-like): sampling stencil
n (int): degree of derivative to find
"""
hvec = np.array(hvec, dtype=np.float)
acc = len(hvec)
exp = np.column_stack([np.arange(acc)]*acc)
a = np.vstack([hvec] * acc) ** exp
b = np.zeros(acc)
b[n] = factorial(n)
return np.linalg.solve(a, b)
|
darkleons/odoo
|
refs/heads/master
|
addons/survey/wizard/survey_email_compose_message.py
|
106
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp.osv import osv
from openerp.osv import fields
from openerp.tools.translate import _
from datetime import datetime
import re
import uuid
import urlparse
emails_split = re.compile(r"[;,\n\r]+")
class survey_mail_compose_message(osv.TransientModel):
_name = 'survey.mail.compose.message'
_inherit = 'mail.compose.message'
_description = 'Email composition wizard for Survey'
_log_access = True
def _get_public_url(self, cr, uid, ids, name, arg, context=None):
res = dict((id, 0) for id in ids)
survey_obj = self.pool.get('survey.survey')
for wizard in self.browse(cr, uid, ids, context=context):
res[wizard.id] = survey_obj.browse(cr, uid, wizard.survey_id, context=context).public_url
return res
def _get_public_url_html(self, cr, uid, ids, name, arg, context=None):
""" Compute if the message is unread by the current user """
urls = self._get_public_url(cr, uid, ids, name, arg, context=context)
for key, url in urls.items():
urls[key] = '<a href="%s">%s</a>' % (url, _("Click here to start survey"))
return urls
_columns = {
'survey_id': fields.many2one('survey.survey', 'Survey', required=True),
'public': fields.selection([('public_link', 'Share the public web link to your audience.'),
('email_public_link', 'Send by email the public web link to your audience.'),
('email_private', 'Send private invitation to your audience (only one response per recipient and per invitation).')],
string='Share options', required=True),
'public_url': fields.function(_get_public_url, string="Public url", type="char"),
'public_url_html': fields.function(_get_public_url_html, string="Public HTML web link", type="char"),
'partner_ids': fields.many2many('res.partner',
'survey_mail_compose_message_res_partner_rel',
'wizard_id', 'partner_id', 'Existing contacts'),
'attachment_ids': fields.many2many('ir.attachment',
'survey_mail_compose_message_ir_attachments_rel',
'wizard_id', 'attachment_id', 'Attachments'),
'multi_email': fields.text(string='List of emails', help="This list of emails of recipients will not converted in contacts. Emails separated by commas, semicolons or newline."),
'date_deadline': fields.date(string="Deadline to which the invitation to respond is valid", help="Deadline to which the invitation to respond for this survey is valid. If the field is empty, the invitation is still valid."),
}
_defaults = {
'public': 'public_link',
'survey_id': lambda self, cr, uid, ctx={}: ctx.get('model') == 'survey.survey' and ctx.get('res_id') or None
}
def default_get(self, cr, uid, fields, context=None):
res = super(survey_mail_compose_message, self).default_get(cr, uid, fields, context=context)
if context.get('active_model') == 'res.partner' and context.get('active_ids'):
res.update({'partner_ids': context.get('active_ids')})
return res
def onchange_multi_email(self, cr, uid, ids, multi_email, context=None):
emails = list(set(emails_split.split(multi_email or "")))
emails_checked = []
error_message = ""
for email in emails:
email = email.strip()
if email:
if not re.search(r"^[^@]+@[^@]+$", email):
error_message += "\n'%s'" % email
else:
emails_checked.append(email)
if error_message:
raise osv.except_osv(_('Warning!'), _("One email at least is incorrect: %s" % error_message))
emails_checked.sort()
values = {'multi_email': '\n'.join(emails_checked)}
return {'value': values}
def onchange_survey_id(self, cr, uid, ids, survey_id, context=None):
""" Compute if the message is unread by the current user. """
if survey_id:
survey = self.pool.get('survey.survey').browse(cr, uid, survey_id, context=context)
return {
'value': {
'subject': survey.title,
'public_url': survey.public_url,
'public_url_html': '<a href="%s">%s</a>' % (survey.public_url, _("Click here to take survey")),
}}
else:
txt = _("Please select a survey")
return {
'value': {
'public_url': txt,
'public_url_html': txt,
}}
#------------------------------------------------------
# Wizard validation and send
#------------------------------------------------------
def send_mail(self, cr, uid, ids, context=None):
""" Process the wizard content and proceed with sending the related
email(s), rendering any template patterns on the fly if needed """
if context is None:
context = {}
survey_response_obj = self.pool.get('survey.user_input')
partner_obj = self.pool.get('res.partner')
mail_mail_obj = self.pool.get('mail.mail')
try:
model, anonymous_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'portal', 'group_anonymous')
except ValueError:
anonymous_id = None
def create_response_and_send_mail(wizard, token, partner_id, email):
""" Create one mail by recipients and replace __URL__ by link with identification token """
#set url
url = wizard.survey_id.public_url
url = urlparse.urlparse(url).path[1:] # dirty hack to avoid incorrect urls
if token:
url = url + '/' + token
# post the message
values = {
'model': None,
'res_id': None,
'subject': wizard.subject,
'body': wizard.body.replace("__URL__", url),
'body_html': wizard.body.replace("__URL__", url),
'parent_id': None,
'partner_ids': partner_id and [(4, partner_id)] or None,
'notified_partner_ids': partner_id and [(4, partner_id)] or None,
'attachment_ids': wizard.attachment_ids or None,
'email_from': wizard.email_from or None,
'email_to': email,
}
mail_id = mail_mail_obj.create(cr, uid, values, context=context)
mail_mail_obj.send(cr, uid, [mail_id], context=context)
def create_token(wizard, partner_id, email):
if context.get("survey_resent_token"):
response_ids = survey_response_obj.search(cr, uid, [('survey_id', '=', wizard.survey_id.id), ('state', 'in', ['new', 'skip']), '|', ('partner_id', '=', partner_id), ('email', '=', email)], context=context)
if response_ids:
return survey_response_obj.read(cr, uid, response_ids, ['token'], context=context)[0]['token']
if wizard.public != 'email_private':
return None
else:
token = uuid.uuid4().__str__()
# create response with token
survey_response_obj.create(cr, uid, {
'survey_id': wizard.survey_id.id,
'deadline': wizard.date_deadline,
'date_create': datetime.now(),
'type': 'link',
'state': 'new',
'token': token,
'partner_id': partner_id,
'email': email})
return token
for wizard in self.browse(cr, uid, ids, context=context):
# check if __URL__ is in the text
if wizard.body.find("__URL__") < 0:
raise osv.except_osv(_('Warning!'), _("The content of the text don't contain '__URL__'. \
__URL__ is automaticaly converted into the special url of the survey."))
if not wizard.multi_email and not wizard.partner_ids and (context.get('default_partner_ids') or context.get('default_multi_email')):
wizard.multi_email = context.get('default_multi_email')
wizard.partner_ids = context.get('default_partner_ids')
# quick check of email list
emails_list = []
if wizard.multi_email:
emails = list(set(emails_split.split(wizard.multi_email)) - set([partner.email for partner in wizard.partner_ids]))
for email in emails:
email = email.strip()
if re.search(r"^[^@]+@[^@]+$", email):
emails_list.append(email)
# remove public anonymous access
partner_list = []
for partner in wizard.partner_ids:
if not anonymous_id or not partner.user_ids or anonymous_id not in [x.id for x in partner.user_ids[0].groups_id]:
partner_list.append({'id': partner.id, 'email': partner.email})
if not len(emails_list) and not len(partner_list):
if wizard.model == 'res.partner' and wizard.res_id:
return False
raise osv.except_osv(_('Warning!'), _("Please enter at least one valid recipient."))
for email in emails_list:
partner_id = partner_obj.search(cr, uid, [('email', '=', email)], context=context)
partner_id = partner_id and partner_id[0] or None
token = create_token(wizard, partner_id, email)
create_response_and_send_mail(wizard, token, partner_id, email)
for partner in partner_list:
token = create_token(wizard, partner['id'], partner['email'])
create_response_and_send_mail(wizard, token, partner['id'], partner['email'])
return {'type': 'ir.actions.act_window_close'}
|
fidomason/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/test/subprocessdata/qgrep.py
|
241
|
"""When called with a single argument, simulated fgrep with a single
argument and no options."""
import sys
if __name__ == "__main__":
pattern = sys.argv[1]
for line in sys.stdin:
if pattern in line:
sys.stdout.write(line)
|
admetricks/phantomjs
|
refs/heads/master
|
src/breakpad/src/tools/gyp/test/variables/gyptest-commands.py
|
138
|
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Test variable expansion of '<!()' syntax commands.
"""
import os
import TestGyp
test = TestGyp.TestGyp(format='gypd')
expect = test.read('commands.gyp.stdout')
# Set $HOME so that gyp doesn't read the user's actual
# ~/.gyp/include.gypi file, which may contain variables
# and other settings that would change the output.
os.environ['HOME'] = test.workpath()
test.run_gyp('commands.gyp',
'--debug', 'variables', '--debug', 'general',
stdout=expect)
# Verify the commands.gypd against the checked-in expected contents.
#
# Normally, we should canonicalize line endings in the expected
# contents file setting the Subversion svn:eol-style to native,
# but that would still fail if multiple systems are sharing a single
# workspace on a network-mounted file system. Consequently, we
# massage the Windows line endings ('\r\n') in the output to the
# checked-in UNIX endings ('\n').
contents = test.read('commands.gypd').replace('\r\n', '\n')
expect = test.read('commands.gypd.golden')
if not test.match(contents, expect):
print "Unexpected contents of `commands.gypd'"
self.diff(expect, contents, 'commands.gypd ')
test.fail_test()
test.pass_test()
|
elisabeth7777/Kebab-CafeCompta
|
refs/heads/master
|
web/global/plugins/jquery-file-upload/server/gae-python/main.py
|
242
|
# -*- coding: utf-8 -*-
#
# jQuery File Upload Plugin GAE Python Example 2.1.1
# https://github.com/blueimp/jQuery-File-Upload
#
# Copyright 2011, Sebastian Tschan
# https://blueimp.net
#
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT
#
from __future__ import with_statement
from google.appengine.api import files, images
from google.appengine.ext import blobstore, deferred
from google.appengine.ext.webapp import blobstore_handlers
import json
import re
import urllib
import webapp2
WEBSITE = 'http://blueimp.github.io/jQuery-File-Upload/'
MIN_FILE_SIZE = 1 # bytes
MAX_FILE_SIZE = 5000000 # bytes
IMAGE_TYPES = re.compile('image/(gif|p?jpeg|(x-)?png)')
ACCEPT_FILE_TYPES = IMAGE_TYPES
THUMBNAIL_MODIFICATOR = '=s80' # max width / height
EXPIRATION_TIME = 300 # seconds
def cleanup(blob_keys):
blobstore.delete(blob_keys)
class UploadHandler(webapp2.RequestHandler):
def initialize(self, request, response):
super(UploadHandler, self).initialize(request, response)
self.response.headers['Access-Control-Allow-Origin'] = '*'
self.response.headers[
'Access-Control-Allow-Methods'
] = 'OPTIONS, HEAD, GET, POST, PUT, DELETE'
self.response.headers[
'Access-Control-Allow-Headers'
] = 'Content-Type, Content-Range, Content-Disposition'
def validate(self, file):
if file['size'] < MIN_FILE_SIZE:
file['error'] = 'File is too small'
elif file['size'] > MAX_FILE_SIZE:
file['error'] = 'File is too big'
elif not ACCEPT_FILE_TYPES.match(file['type']):
file['error'] = 'Filetype not allowed'
else:
return True
return False
def get_file_size(self, file):
file.seek(0, 2) # Seek to the end of the file
size = file.tell() # Get the position of EOF
file.seek(0) # Reset the file position to the beginning
return size
def write_blob(self, data, info):
blob = files.blobstore.create(
mime_type=info['type'],
_blobinfo_uploaded_filename=info['name']
)
with files.open(blob, 'a') as f:
f.write(data)
files.finalize(blob)
return files.blobstore.get_blob_key(blob)
def handle_upload(self):
results = []
blob_keys = []
for name, fieldStorage in self.request.POST.items():
if type(fieldStorage) is unicode:
continue
result = {}
result['name'] = re.sub(
r'^.*\\',
'',
fieldStorage.filename
)
result['type'] = fieldStorage.type
result['size'] = self.get_file_size(fieldStorage.file)
if self.validate(result):
blob_key = str(
self.write_blob(fieldStorage.value, result)
)
blob_keys.append(blob_key)
result['deleteType'] = 'DELETE'
result['deleteUrl'] = self.request.host_url +\
'/?key=' + urllib.quote(blob_key, '')
if (IMAGE_TYPES.match(result['type'])):
try:
result['url'] = images.get_serving_url(
blob_key,
secure_url=self.request.host_url.startswith(
'https'
)
)
result['thumbnailUrl'] = result['url'] +\
THUMBNAIL_MODIFICATOR
except: # Could not get an image serving url
pass
if not 'url' in result:
result['url'] = self.request.host_url +\
'/' + blob_key + '/' + urllib.quote(
result['name'].encode('utf-8'), '')
results.append(result)
deferred.defer(
cleanup,
blob_keys,
_countdown=EXPIRATION_TIME
)
return results
def options(self):
pass
def head(self):
pass
def get(self):
self.redirect(WEBSITE)
def post(self):
if (self.request.get('_method') == 'DELETE'):
return self.delete()
result = {'files': self.handle_upload()}
s = json.dumps(result, separators=(',', ':'))
redirect = self.request.get('redirect')
if redirect:
return self.redirect(str(
redirect.replace('%s', urllib.quote(s, ''), 1)
))
if 'application/json' in self.request.headers.get('Accept'):
self.response.headers['Content-Type'] = 'application/json'
self.response.write(s)
def delete(self):
key = self.request.get('key') or ''
blobstore.delete(key)
s = json.dumps({key: True}, separators=(',', ':'))
if 'application/json' in self.request.headers.get('Accept'):
self.response.headers['Content-Type'] = 'application/json'
self.response.write(s)
class DownloadHandler(blobstore_handlers.BlobstoreDownloadHandler):
def get(self, key, filename):
if not blobstore.get(key):
self.error(404)
else:
# Prevent browsers from MIME-sniffing the content-type:
self.response.headers['X-Content-Type-Options'] = 'nosniff'
# Cache for the expiration time:
self.response.headers['Cache-Control'] = 'public,max-age=%d' % EXPIRATION_TIME
# Send the file forcing a download dialog:
self.send_blob(key, save_as=filename, content_type='application/octet-stream')
app = webapp2.WSGIApplication(
[
('/', UploadHandler),
('/([^/]+)/([^/]+)', DownloadHandler)
],
debug=True
)
|
DarthMaulware/EquationGroupLeaks
|
refs/heads/master
|
Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Lib/dsz/mca/status/cmd/uptime/tasking_dsz.py
|
1
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: tasking_dsz.py
import mcl.framework
import mcl.tasking
class dsz:
INTERFACE = 16842801
PFAM = 4204
PROVIDER_ANY = 4204
PROVIDER = 16846956
RPC_INFO_QUERY = mcl.tasking.RpcInfo(mcl.framework.DSZ, [INTERFACE, PROVIDER_ANY, 0])
|
vimagick/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/lifenews.py
|
109
|
# encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
determine_ext,
int_or_none,
remove_end,
unified_strdate,
ExtractorError,
)
class LifeNewsIE(InfoExtractor):
IE_NAME = 'lifenews'
IE_DESC = 'LIFE | NEWS'
_VALID_URL = r'http://lifenews\.ru/(?:mobile/)?(?P<section>news|video)/(?P<id>\d+)'
_TESTS = [{
'url': 'http://lifenews.ru/news/126342',
'md5': 'e1b50a5c5fb98a6a544250f2e0db570a',
'info_dict': {
'id': '126342',
'ext': 'mp4',
'title': 'МВД разыскивает мужчин, оставивших в IKEA сумку с автоматом',
'description': 'Камеры наблюдения гипермаркета зафиксировали троих мужчин, спрятавших оружейный арсенал в камере хранения.',
'thumbnail': 're:http://.*\.jpg',
'upload_date': '20140130',
}
}, {
# video in <iframe>
'url': 'http://lifenews.ru/news/152125',
'md5': '77d19a6f0886cd76bdbf44b4d971a273',
'info_dict': {
'id': '152125',
'ext': 'mp4',
'title': 'В Сети появилось видео захвата «Правым сектором» колхозных полей ',
'description': 'Жители двух поселков Днепропетровской области не простили радикалам угрозу лишения плодородных земель и пошли в лобовую. ',
'upload_date': '20150402',
}
}, {
'url': 'http://lifenews.ru/news/153461',
'md5': '9b6ef8bc0ffa25aebc8bdb40d89ab795',
'info_dict': {
'id': '153461',
'ext': 'mp4',
'title': 'В Москве спасли потерявшегося медвежонка, который спрятался на дереве',
'description': 'Маленький хищник не смог найти дорогу домой и обрел временное убежище на тополе недалеко от жилого массива, пока его не нашла соседская собака.',
'upload_date': '20150505',
}
}, {
'url': 'http://lifenews.ru/video/13035',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
section = mobj.group('section')
webpage = self._download_webpage(
'http://lifenews.ru/%s/%s' % (section, video_id),
video_id, 'Downloading page')
videos = re.findall(r'<video.*?poster="(?P<poster>[^"]+)".*?src="(?P<video>[^"]+)".*?></video>', webpage)
iframe_link = self._html_search_regex(
'<iframe[^>]+src=["\']([^"\']+)["\']', webpage, 'iframe link', default=None)
if not videos and not iframe_link:
raise ExtractorError('No media links available for %s' % video_id)
title = remove_end(
self._og_search_title(webpage),
' - Первый по срочным новостям — LIFE | NEWS')
description = self._og_search_description(webpage)
view_count = self._html_search_regex(
r'<div class=\'views\'>\s*(\d+)\s*</div>', webpage, 'view count', fatal=False)
comment_count = self._html_search_regex(
r'=\'commentCount\'[^>]*>\s*(\d+)\s*<',
webpage, 'comment count', fatal=False)
upload_date = self._html_search_regex(
r'<time[^>]*datetime=\'([^\']+)\'', webpage, 'upload date', fatal=False)
if upload_date is not None:
upload_date = unified_strdate(upload_date)
common_info = {
'description': description,
'view_count': int_or_none(view_count),
'comment_count': int_or_none(comment_count),
'upload_date': upload_date,
}
def make_entry(video_id, media, video_number=None):
cur_info = dict(common_info)
cur_info.update({
'id': video_id,
'url': media[1],
'thumbnail': media[0],
'title': title if video_number is None else '%s-video%s' % (title, video_number),
})
return cur_info
if iframe_link:
iframe_link = self._proto_relative_url(iframe_link, 'http:')
cur_info = dict(common_info)
cur_info.update({
'_type': 'url_transparent',
'id': video_id,
'title': title,
'url': iframe_link,
})
return cur_info
if len(videos) == 1:
return make_entry(video_id, videos[0])
else:
return [make_entry(video_id, media, video_number + 1) for video_number, media in enumerate(videos)]
class LifeEmbedIE(InfoExtractor):
IE_NAME = 'life:embed'
_VALID_URL = r'http://embed\.life\.ru/embed/(?P<id>[\da-f]{32})'
_TEST = {
'url': 'http://embed.life.ru/embed/e50c2dec2867350528e2574c899b8291',
'md5': 'b889715c9e49cb1981281d0e5458fbbe',
'info_dict': {
'id': 'e50c2dec2867350528e2574c899b8291',
'ext': 'mp4',
'title': 'e50c2dec2867350528e2574c899b8291',
'thumbnail': 're:http://.*\.jpg',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
formats = []
for video_url in re.findall(r'"file"\s*:\s*"([^"]+)', webpage):
video_url = compat_urlparse.urljoin(url, video_url)
ext = determine_ext(video_url)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
video_url, video_id, 'mp4', m3u8_id='m3u8'))
else:
formats.append({
'url': video_url,
'format_id': ext,
'preference': 1,
})
self._sort_formats(formats)
thumbnail = self._search_regex(
r'"image"\s*:\s*"([^"]+)', webpage, 'thumbnail', default=None)
return {
'id': video_id,
'title': video_id,
'thumbnail': thumbnail,
'formats': formats,
}
|
davidvon/pipa-pay-server
|
refs/heads/master
|
site-packages/whoosh/matching/combo.py
|
92
|
# Copyright 2010 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from __future__ import division
from array import array
from whoosh.compat import xrange
from whoosh.matching import mcore
class CombinationMatcher(mcore.Matcher):
def __init__(self, submatchers, boost=1.0):
self._submatchers = submatchers
self._boost = boost
def supports_block_quality(self):
return all(m.supports_block_quality() for m in self._submatchers)
def max_quality(self):
return max(m.max_quality() for m in self._submatchers
if m.is_active()) * self._boost
def supports(self, astype):
return all(m.supports(astype) for m in self._submatchers)
def children(self):
return iter(self._submatchers)
def score(self):
return sum(m.score() for m in self._submatchers) * self._boost
class PreloadedUnionMatcher(CombinationMatcher):
"""Instead of marching the sub-matchers along in parallel, this
matcher pre-reads the scores for EVERY MATCHING DOCUMENT, trading memory
for speed.
This is faster than the implementation using a binary tree of
:class:`~whoosh.matching.binary.UnionMatcher` objects (possibly just
because of less overhead), but it doesn't allow getting information about
the "current" document other than the score, because there isn't really a
current document, just an array of scores.
"""
def __init__(self, submatchers, doccount, boost=1.0, scored=True):
CombinationMatcher.__init__(self, submatchers, boost=boost)
self._doccount = doccount
a = array("d")
active = [subm for subm in self._submatchers if subm.is_active()]
if active:
offset = self._docnum = min(m.id() for m in active)
for m in active:
while m.is_active():
if scored:
score = m.score() * boost
else:
score = boost
docnum = m.id()
place = docnum - offset
if len(a) <= place:
a.extend(0 for _ in xrange(place - len(a) + 1))
a[place] += score
m.next()
self._a = a
self._offset = offset
else:
self._docnum = 0
self._offset = 0
self._a = a
def is_active(self):
return self._docnum - self._offset < len(self._a)
def id(self):
return self._docnum
def score(self):
return self._a[self._docnum - self._offset]
def next(self):
a = self._a
offset = self._offset
place = self._docnum - offset
place += 1
while place < len(a) and a[place] == 0:
place += 1
self._docnum = place + offset
def max_quality(self):
return max(self._a[self._docnum - self._offset:])
def block_quality(self):
return self.max_quality()
def skip_to(self, docnum):
if docnum < self._docnum:
return
self._docnum = docnum
i = docnum - self._offset
if i < len(self._a) and self._a[i] == 0:
self.next()
def skip_to_quality(self, minquality):
a = self._a
offset = self._offset
place = self._docnum - offset
skipped = 0
while place < len(a) and a[place] <= minquality:
place += 1
skipped = 1
self._docnum = place + offset
return skipped
def supports(self, astype):
# This matcher doesn't support any posting values
return False
def all_ids(self):
a = self._a
offset = self._offset
place = self._docnum - offset
while place < len(a):
if a[place] > 0:
yield place + offset
place += 1
class ArrayUnionMatcher(CombinationMatcher):
"""Instead of marching the sub-matchers along in parallel, this matcher
pre-reads the scores for a large block of documents at a time from each
matcher, accumulating the scores in an array.
This is faster than the implementation using a binary tree of
:class:`~whoosh.matching.binary.UnionMatcher` objects (possibly just
because of less overhead), but it doesn't allow getting information about
the "current" document other than the score, because there isn't really a
current document, just an array of scores.
"""
def __init__(self, submatchers, doccount, boost=1.0, scored=True,
partsize=2048):
CombinationMatcher.__init__(self, submatchers, boost=boost)
self._scored = scored
self._doccount = doccount
if not partsize:
partsize = doccount
self._partsize = partsize
self._a = array("d", (0 for _ in xrange(self._partsize)))
self._docnum = self._min_id()
self._read_part()
def __repr__(self):
return ("%s(%r, boost=%f, scored=%r, partsize=%d)"
% (self.__class__.__name__, self._submatchers, self._boost,
self._scored, self._partsize))
def _min_id(self):
active = [subm for subm in self._submatchers if subm.is_active()]
if active:
return min(subm.id() for subm in active)
else:
return self._doccount
def _read_part(self):
scored = self._scored
boost = self._boost
limit = min(self._docnum + self._partsize, self._doccount)
offset = self._docnum
a = self._a
# Clear the array
for i in xrange(self._partsize):
a[i] = 0
# Add the scores from the submatchers into the array
for m in self._submatchers:
while m.is_active() and m.id() < limit:
i = m.id() - offset
if scored:
a[i] += m.score() * boost
else:
a[i] = 1
m.next()
self._offset = offset
self._limit = limit
def _find_next(self):
a = self._a
docnum = self._docnum
offset = self._offset
limit = self._limit
while docnum < limit:
if a[docnum - offset] > 0:
break
docnum += 1
if docnum == limit:
self._docnum = self._min_id()
self._read_part()
else:
self._docnum = docnum
def supports(self, astype):
# This matcher doesn't support any posting values
return False
def is_active(self):
return self._docnum < self._doccount
def max_quality(self):
return max(m.max_quality() for m in self._submatchers)
def block_quality(self):
return max(self._a)
def skip_to(self, docnum):
if docnum < self._offset:
# We've already passed it
return
elif docnum < self._limit:
# It's in the current part
self._docnum = docnum
self._find_next()
return
# Advance all active submatchers
submatchers = self._submatchers
active = False
for subm in submatchers:
if subm.is_active():
subm.skip_to(docnum)
if any(subm.is_active() for subm in submatchers):
# Rebuffer
self._docnum = self._min_id()
self._read_part()
else:
self._docnum = self._doccount
def skip_to_quality(self, minquality):
skipped = 0
while self.is_active() and self.block_quality() <= minquality:
skipped += 1
self._docnum = self._limit
self._read_part()
if self.is_active():
self._find_next()
return skipped
def id(self):
return self._docnum
def all_ids(self):
doccount = self._doccount
docnum = self._docnum
offset = self._offset
limit = self._limit
a = self._a
while docnum < doccount:
if a[docnum - offset] > 0:
yield docnum
docnum += 1
if docnum == limit:
self._docnum = docnum
self._read_part()
offset = self._offset
limit = self._limit
def next(self):
self._docnum += 1
return self._find_next()
def score(self):
return self._a[self._docnum - self._offset]
|
stegua/dotlib
|
refs/heads/master
|
python/view_matrix.py
|
1
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 13 18:24:13 2017
@author: gualandi
"""
import matplotlib.pyplot as plt
import numpy as np
def DrawMatrix(M, name):
# Crea una figura, disegna l'immagine data dalla matrice, aggiungi la colorbar sulla destra
plt.figure(figsize=(6,6))
# Uso la colormap 'gray' per avere la schacchiera in bianco&nero
plt.imshow(M, cmap='gray')
plt.savefig("prova.pdf", dpi=300)
plt.show()
#------------------------------------------
# MAIN ENTRY POINT
#------------------------------------------
if __name__ == "__main__":
filename1 = 'D:\Ricerca\DOTA\data\DOTmark_1.0\Data\ClassicImages\data32_1001.csv'
M1 = np.loadtxt(open(filename1, "rb"), delimiter=",")
DrawMatrix(M1, )
filename2 = 'D:\Ricerca\DOTA\data\DOTmark_1.0\Data\ClassicImages\data64_1001.csv'
M2 = np.loadtxt(open(filename2, "rb"), delimiter=",")
DrawMatrix(M2)
filename2 = 'D:\Ricerca\DOTA\data\DOTmark_1.0\Data\ClassicImages\data128_1001.csv'
M2 = np.loadtxt(open(filename2, "rb"), delimiter=",")
DrawMatrix(M2)
filename2 = 'D:\Ricerca\DOTA\data\DOTmark_1.0\Data\ClassicImages\data256_1001.csv'
M2 = np.loadtxt(open(filename2, "rb"), delimiter=",")
DrawMatrix(M2)
filename2 = 'D:\Ricerca\DOTA\data\DOTmark_1.0\Data\ClassicImages\data512_1001.csv'
M2 = np.loadtxt(open(filename2, "rb"), delimiter=",")
DrawMatrix(M2)
|
ZLLab-Mooc/edx-platform
|
refs/heads/named-release/dogwood.rc
|
cms/djangoapps/contentstore/features/course-outline.py
|
19
|
# pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
from lettuce import world, step
from common import *
from nose.tools import assert_true, assert_false
from logging import getLogger
logger = getLogger(__name__)
@step(u'I have a course with no sections$')
def have_a_course(step):
world.clear_courses()
course = world.CourseFactory.create()
@step(u'I have a course with 1 section$')
def have_a_course_with_1_section(step):
world.clear_courses()
course = world.CourseFactory.create()
section = world.ItemFactory.create(parent_location=course.location)
subsection1 = world.ItemFactory.create(
parent_location=section.location,
category='sequential',
display_name='Subsection One',)
@step(u'I have a course with multiple sections$')
def have_a_course_with_two_sections(step):
world.clear_courses()
course = world.CourseFactory.create()
section = world.ItemFactory.create(parent_location=course.location)
subsection1 = world.ItemFactory.create(
parent_location=section.location,
category='sequential',
display_name='Subsection One',)
section2 = world.ItemFactory.create(
parent_location=course.location,
display_name='Section Two',)
subsection2 = world.ItemFactory.create(
parent_location=section2.location,
category='sequential',
display_name='Subsection Alpha',)
subsection3 = world.ItemFactory.create(
parent_location=section2.location,
category='sequential',
display_name='Subsection Beta',)
@step(u'I navigate to the course outline page$')
def navigate_to_the_course_outline_page(step):
create_studio_user(is_staff=True)
log_into_studio()
course_locator = 'a.course-link'
world.css_click(course_locator)
@step(u'I navigate to the outline page of a course with multiple sections')
def nav_to_the_outline_page_of_a_course_with_multiple_sections(step):
step.given('I have a course with multiple sections')
step.given('I navigate to the course outline page')
@step(u'I add a section')
def i_add_a_section(step):
add_section()
@step(u'I press the section delete icon')
def i_press_the_section_delete_icon(step):
delete_locator = 'section .outline-section > .section-header a.delete-button'
world.css_click(delete_locator)
@step(u'I will confirm all alerts')
def i_confirm_all_alerts(step):
confirm_locator = '.prompt .nav-actions button.action-primary'
world.css_click(confirm_locator)
@step(u'I see the "([^"]*) All Sections" link$')
def i_see_the_collapse_expand_all_span(step, text):
if text == "Collapse":
span_locator = '.button-toggle-expand-collapse .collapse-all .label'
elif text == "Expand":
span_locator = '.button-toggle-expand-collapse .expand-all .label'
assert_true(world.css_visible(span_locator))
@step(u'I do not see the "([^"]*) All Sections" link$')
def i_do_not_see_the_collapse_expand_all_span(step, text):
if text == "Collapse":
span_locator = '.button-toggle-expand-collapse .collapse-all .label'
elif text == "Expand":
span_locator = '.button-toggle-expand-collapse .expand-all .label'
assert_false(world.css_visible(span_locator))
@step(u'I click the "([^"]*) All Sections" link$')
def i_click_the_collapse_expand_all_span(step, text):
if text == "Collapse":
span_locator = '.button-toggle-expand-collapse .collapse-all .label'
elif text == "Expand":
span_locator = '.button-toggle-expand-collapse .expand-all .label'
assert_true(world.browser.is_element_present_by_css(span_locator))
world.css_click(span_locator)
@step(u'I ([^"]*) the first section$')
def i_collapse_expand_a_section(step, text):
if text == "collapse":
locator = 'section .outline-section .ui-toggle-expansion'
elif text == "expand":
locator = 'section .outline-section .ui-toggle-expansion'
world.css_click(locator)
@step(u'all sections are ([^"]*)$')
def all_sections_are_collapsed_or_expanded(step, text):
subsection_locator = 'div.subsection-list'
subsections = world.css_find(subsection_locator)
for index in range(len(subsections)):
if text == "collapsed":
assert_false(world.css_visible(subsection_locator, index=index))
elif text == "expanded":
assert_true(world.css_visible(subsection_locator, index=index))
@step(u"I change an assignment's grading status")
def change_grading_status(step):
world.css_find('a.menu-toggle').click()
world.css_find('.menu li').first.click()
|
BertrandBordage/django-tree
|
refs/heads/master
|
tests/settings.py
|
1
|
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'tree',
'USER': 'tree',
},
}
INSTALLED_APPS = (
'tree',
'tests',
)
SECRET_KEY = 'not important here'
|
Azure/azure-sdk-for-python
|
refs/heads/sync-eng/common-js-nightly-docs-2-1768-ForTestPipeline
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_01_01/operations/_packet_captures_operations.py
|
1
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class PacketCapturesOperations(object):
"""PacketCapturesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_01_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
parameters, # type: "_models.PacketCapture"
**kwargs # type: Any
):
# type: (...) -> "_models.PacketCaptureResult"
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-01-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PacketCapture')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PacketCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
def begin_create(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
parameters, # type: "_models.PacketCapture"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.PacketCaptureResult"]
"""Create and start a packet capture on the specified VM.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:param parameters: Parameters that define the create packet capture operation.
:type parameters: ~azure.mgmt.network.v2018_01_01.models.PacketCapture
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either PacketCaptureResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_01_01.models.PacketCaptureResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PacketCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.PacketCaptureResult"
"""Gets a packet capture session by name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PacketCaptureResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_01_01.models.PacketCaptureResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-01-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PacketCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-01-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
def _stop_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-01-01"
# Construct URL
url = self._stop_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/stop'} # type: ignore
def begin_stop(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Stops a specified packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._stop_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/stop'} # type: ignore
def _get_status_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.PacketCaptureQueryStatusResult"
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureQueryStatusResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-01-01"
accept = "application/json, text/json"
# Construct URL
url = self._get_status_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PacketCaptureQueryStatusResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('PacketCaptureQueryStatusResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_status_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/queryStatus'} # type: ignore
def begin_get_status(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.PacketCaptureQueryStatusResult"]
"""Query the status of a running packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param packet_capture_name: The name given to the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either PacketCaptureQueryStatusResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_01_01.models.PacketCaptureQueryStatusResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureQueryStatusResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_status_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PacketCaptureQueryStatusResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/queryStatus'} # type: ignore
def list(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.PacketCaptureListResult"]
"""Lists all packet capture sessions within the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PacketCaptureListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_01_01.models.PacketCaptureListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-01-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('PacketCaptureListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures'} # type: ignore
|
ansible/ansible-modules-extras
|
refs/heads/devel
|
web_infrastructure/apache2_mod_proxy.py
|
23
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Olivier Boukili <boukili.olivier@gmail.com>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: apache2_mod_proxy
version_added: "2.2"
short_description: Set and/or get members' attributes of an Apache httpd 2.4 mod_proxy balancer pool
description:
- Set and/or get members' attributes of an Apache httpd 2.4 mod_proxy balancer
pool, using HTTP POST and GET requests. The httpd mod_proxy balancer-member
status page has to be enabled and accessible, as this module relies on parsing
this page. This module supports ansible check_mode, and requires BeautifulSoup
python module.
options:
balancer_url_suffix:
default: /balancer-manager/
description:
- Suffix of the balancer pool url required to access the balancer pool
status page (e.g. balancer_vhost[:port]/balancer_url_suffix).
required: false
balancer_vhost:
default: None
description:
- (ipv4|ipv6|fqdn):port of the Apache httpd 2.4 mod_proxy balancer pool.
required: true
member_host:
default: None
description:
- (ipv4|ipv6|fqdn) of the balancer member to get or to set attributes to.
Port number is autodetected and should not be specified here.
If undefined, apache2_mod_proxy module will return a members list of
dictionaries of all the current balancer pool members' attributes.
required: false
state:
default: None
description:
- Desired state of the member host.
(absent|disabled),drained,hot_standby,ignore_errors can be
simultaneously invoked by separating them with a comma (e.g. state=drained,ignore_errors).
required: false
choices: ["present", "absent", "enabled", "disabled", "drained", "hot_standby", "ignore_errors"]
tls:
default: false
description:
- Use https to access balancer management page.
choices: ["true", "false"]
validate_certs:
default: true
description:
- Validate ssl/tls certificates.
choices: ["true", "false"]
'''
EXAMPLES = '''
# Get all current balancer pool members' attributes:
- apache2_mod_proxy:
balancer_vhost: 10.0.0.2
# Get a specific member's attributes:
- apache2_mod_proxy:
balancer_vhost: myws.mydomain.org
balancer_suffix: /lb/
member_host: node1.myws.mydomain.org
# Enable all balancer pool members:
- apache2_mod_proxy:
balancer_vhost: '{{ myloadbalancer_host }}'
register: result
- apache2_mod_proxy:
balancer_vhost: '{{ myloadbalancer_host }}'
member_host: '{{ item.host }}'
state: present
with_items: '{{ result.members }}'
# Gracefully disable a member from a loadbalancer node:
- apache2_mod_proxy:
balancer_vhost: '{{ vhost_host }}'
member_host: '{{ member.host }}'
state: drained
delegate_to: myloadbalancernode
- wait_for:
host: '{{ member.host }}'
port: '{{ member.port }}'
state: drained
delegate_to: myloadbalancernode
- apache2_mod_proxy:
balancer_vhost: '{{ vhost_host }}'
member_host: '{{ member.host }}'
state: absent
delegate_to: myloadbalancernode
'''
RETURN = '''
member:
description: specific balancer member information dictionary, returned when apache2_mod_proxy module is invoked with member_host parameter.
type: dict
returned: success
sample:
{"attributes":
{"Busy": "0",
"Elected": "42",
"Factor": "1",
"From": "136K",
"Load": "0",
"Route": null,
"RouteRedir": null,
"Set": "0",
"Status": "Init Ok ",
"To": " 47K",
"Worker URL": null
},
"balancer_url": "http://10.10.0.2/balancer-manager/",
"host": "10.10.0.20",
"management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
"path": "/ws",
"port": 8080,
"protocol": "http",
"status": {
"disabled": false,
"drained": false,
"hot_standby": false,
"ignore_errors": false
}
}
members:
description: list of member (defined above) dictionaries, returned when apache2_mod_proxy is invoked with no member_host and state args.
returned: success
type: list
sample:
[{"attributes": {
"Busy": "0",
"Elected": "42",
"Factor": "1",
"From": "136K",
"Load": "0",
"Route": null,
"RouteRedir": null,
"Set": "0",
"Status": "Init Ok ",
"To": " 47K",
"Worker URL": null
},
"balancer_url": "http://10.10.0.2/balancer-manager/",
"host": "10.10.0.20",
"management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
"path": "/ws",
"port": 8080,
"protocol": "http",
"status": {
"disabled": false,
"drained": false,
"hot_standby": false,
"ignore_errors": false
}
},
{"attributes": {
"Busy": "0",
"Elected": "42",
"Factor": "1",
"From": "136K",
"Load": "0",
"Route": null,
"RouteRedir": null,
"Set": "0",
"Status": "Init Ok ",
"To": " 47K",
"Worker URL": null
},
"balancer_url": "http://10.10.0.2/balancer-manager/",
"host": "10.10.0.21",
"management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.21:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
"path": "/ws",
"port": 8080,
"protocol": "http",
"status": {
"disabled": false,
"drained": false,
"hot_standby": false,
"ignore_errors": false}
}
]
'''
import re
try:
from BeautifulSoup import BeautifulSoup
except ImportError:
HAS_BEAUTIFULSOUP = False
else:
HAS_BEAUTIFULSOUP = True
# balancer member attributes extraction regexp:
EXPRESSION = r"(b=([\w\.\-]+)&w=(https?|ajp|wss?|ftp|[sf]cgi)://([\w\.\-]+):?(\d*)([/\w\.\-]*)&?[\w\-\=]*)"
# Apache2 server version extraction regexp:
APACHE_VERSION_EXPRESSION = r"Server Version: Apache/([\d.]+) \(([\w]+)\)"
def regexp_extraction(string, _regexp, groups=1):
""" Returns the capture group (default=1) specified in the regexp, applied to the string """
regexp_search = re.search(string=str(string), pattern=str(_regexp))
if regexp_search:
if regexp_search.group(groups) != '':
return str(regexp_search.group(groups))
return None
class BalancerMember(object):
""" Apache 2.4 mod_proxy LB balancer member.
attributes:
read-only:
host -> member host (string),
management_url -> member management url (string),
protocol -> member protocol (string)
port -> member port (string),
path -> member location (string),
balancer_url -> url of this member's parent balancer (string),
attributes -> whole member attributes (dictionary)
module -> ansible module instance (AnsibleModule object).
writable:
status -> status of the member (dictionary)
"""
def __init__(self, management_url, balancer_url, module):
self.host = regexp_extraction(management_url, str(EXPRESSION), 4)
self.management_url = str(management_url)
self.protocol = regexp_extraction(management_url, EXPRESSION, 3)
self.port = regexp_extraction(management_url, EXPRESSION, 5)
self.path = regexp_extraction(management_url, EXPRESSION, 6)
self.balancer_url = str(balancer_url)
self.module = module
def get_member_attributes(self):
""" Returns a dictionary of a balancer member's attributes."""
balancer_member_page = fetch_url(self.module, self.management_url)
try:
assert balancer_member_page[1]['status'] == 200
except AssertionError:
self.module.fail_json(msg="Could not get balancer_member_page, check for connectivity! " + balancer_member_page[1])
else:
try:
soup = BeautifulSoup(balancer_member_page[0])
except TypeError:
self.module.fail_json(msg="Cannot parse balancer_member_page HTML! " + str(soup))
else:
subsoup = soup.findAll('table')[1].findAll('tr')
keys = subsoup[0].findAll('th')
for valuesset in subsoup[1::1]:
if re.search(pattern=self.host, string=str(valuesset)):
values = valuesset.findAll('td')
return dict((keys[x].string, values[x].string) for x in range(0, len(keys)))
def get_member_status(self):
""" Returns a dictionary of a balancer member's status attributes."""
status_mapping = {'disabled':'Dis',
'drained':'Drn',
'hot_standby':'Stby',
'ignore_errors':'Ign'}
status = {}
actual_status = str(self.attributes['Status'])
for mode in status_mapping.keys():
if re.search(pattern=status_mapping[mode], string=actual_status):
status[mode] = True
else:
status[mode] = False
return status
def set_member_status(self, values):
""" Sets a balancer member's status attributes amongst pre-mapped values."""
values_mapping = {'disabled':'&w_status_D',
'drained':'&w_status_N',
'hot_standby':'&w_status_H',
'ignore_errors':'&w_status_I'}
request_body = regexp_extraction(self.management_url, EXPRESSION, 1)
for k in values_mapping.keys():
if values[str(k)]:
request_body = request_body + str(values_mapping[k]) + '=1'
else:
request_body = request_body + str(values_mapping[k]) + '=0'
response = fetch_url(self.module, self.management_url, data=str(request_body))
try:
assert response[1]['status'] == 200
except AssertionError:
self.module.fail_json(msg="Could not set the member status! " + self.host + " " + response[1]['status'])
attributes = property(get_member_attributes)
status = property(get_member_status, set_member_status)
class Balancer(object):
""" Apache httpd 2.4 mod_proxy balancer object"""
def __init__(self, host, suffix, module, members=None, tls=False):
if tls:
self.base_url = str(str('https://') + str(host))
self.url = str(str('https://') + str(host) + str(suffix))
else:
self.base_url = str(str('http://') + str(host))
self.url = str(str('http://') + str(host) + str(suffix))
self.module = module
self.page = self.fetch_balancer_page()
if members is None:
self._members = []
def fetch_balancer_page(self):
""" Returns the balancer management html page as a string for later parsing."""
page = fetch_url(self.module, str(self.url))
try:
assert page[1]['status'] == 200
except AssertionError:
self.module.fail_json(msg="Could not get balancer page! HTTP status response: " + str(page[1]['status']))
else:
content = page[0].read()
apache_version = regexp_extraction(content, APACHE_VERSION_EXPRESSION, 1)
if not re.search(pattern=r"2\.4\.[\d]*", string=apache_version):
self.module.fail_json(msg="This module only acts on an Apache2 2.4+ instance, current Apache2 version: " + str(apache_version))
return content
def get_balancer_members(self):
""" Returns members of the balancer as a generator object for later iteration."""
try:
soup = BeautifulSoup(self.page)
except TypeError:
self.module.fail_json(msg="Cannot parse balancer page HTML! " + str(self.page))
else:
for element in soup.findAll('a')[1::1]:
balancer_member_suffix = str(element.get('href'))
try:
assert balancer_member_suffix is not ''
except AssertionError:
self.module.fail_json(msg="Argument 'balancer_member_suffix' is empty!")
else:
yield BalancerMember(str(self.base_url + balancer_member_suffix), str(self.url), self.module)
members = property(get_balancer_members)
def main():
""" Initiates module."""
module = AnsibleModule(
argument_spec=dict(
balancer_vhost=dict(required=True, default=None, type='str'),
balancer_url_suffix=dict(default="/balancer-manager/", type='str'),
member_host=dict(type='str'),
state=dict(type='str'),
tls=dict(default=False, type='bool'),
validate_certs=dict(default=True, type='bool')
),
supports_check_mode=True
)
if HAS_BEAUTIFULSOUP is False:
module.fail_json(msg="python module 'BeautifulSoup' is required!")
if module.params['state'] != None:
states = module.params['state'].split(',')
if (len(states) > 1) and (("present" in states) or ("enabled" in states)):
module.fail_json(msg="state present/enabled is mutually exclusive with other states!")
else:
for _state in states:
if _state not in ['present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors']:
module.fail_json(msg="State can only take values amongst 'present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors'.")
else:
states = ['None']
mybalancer = Balancer(module.params['balancer_vhost'],
module.params['balancer_url_suffix'],
module=module,
tls=module.params['tls'])
if module.params['member_host'] is None:
json_output_list = []
for member in mybalancer.members:
json_output_list.append({
"host": member.host,
"status": member.status,
"protocol": member.protocol,
"port": member.port,
"path": member.path,
"attributes": member.attributes,
"management_url": member.management_url,
"balancer_url": member.balancer_url
})
module.exit_json(
changed=False,
members=json_output_list
)
else:
changed = False
member_exists = False
member_status = {'disabled': False, 'drained': False, 'hot_standby': False, 'ignore_errors':False}
for mode in member_status.keys():
for state in states:
if mode == state:
member_status[mode] = True
elif mode == 'disabled' and state == 'absent':
member_status[mode] = True
for member in mybalancer.members:
if str(member.host) == str(module.params['member_host']):
member_exists = True
if module.params['state'] is not None:
member_status_before = member.status
if not module.check_mode:
member_status_after = member.status = member_status
else:
member_status_after = member_status
if member_status_before != member_status_after:
changed = True
json_output = {
"host": member.host,
"status": member.status,
"protocol": member.protocol,
"port": member.port,
"path": member.path,
"attributes": member.attributes,
"management_url": member.management_url,
"balancer_url": member.balancer_url
}
if member_exists:
module.exit_json(
changed=changed,
member=json_output
)
else:
module.fail_json(msg=str(module.params['member_host']) + ' is not a member of the balancer ' + str(module.params['balancer_vhost']) + '!')
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
if __name__ == '__main__':
main()
|
musically-ut/numpy
|
refs/heads/master
|
numpy/f2py/capi_maps.py
|
17
|
#!/usr/bin/env python
"""
Copyright 1999,2000 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/05/06 10:57:33 $
Pearu Peterson
"""
from __future__ import division, absolute_import, print_function
__version__ = "$Revision: 1.60 $"[10:-1]
from . import __version__
f2py_version = __version__.version
import copy
import re
import os
import sys
from .auxfuncs import *
from .crackfortran import markoutercomma
from . import cb_rules
# Numarray and Numeric users should set this False
using_newcore = True
depargs=[]
lcb_map={}
lcb2_map={}
# forced casting: mainly caused by the fact that Python or Numeric
# C/APIs do not support the corresponding C types.
c2py_map={'double': 'float',
'float': 'float', # forced casting
'long_double': 'float', # forced casting
'char': 'int', # forced casting
'signed_char': 'int', # forced casting
'unsigned_char': 'int', # forced casting
'short': 'int', # forced casting
'unsigned_short': 'int', # forced casting
'int': 'int', # (forced casting)
'long': 'int',
'long_long': 'long',
'unsigned': 'int', # forced casting
'complex_float': 'complex', # forced casting
'complex_double': 'complex',
'complex_long_double': 'complex', # forced casting
'string': 'string',
}
c2capi_map={'double':'NPY_DOUBLE',
'float':'NPY_FLOAT',
'long_double':'NPY_DOUBLE', # forced casting
'char':'NPY_CHAR',
'unsigned_char':'NPY_UBYTE',
'signed_char':'NPY_BYTE',
'short':'NPY_SHORT',
'unsigned_short':'NPY_USHORT',
'int':'NPY_INT',
'unsigned':'NPY_UINT',
'long':'NPY_LONG',
'long_long':'NPY_LONG', # forced casting
'complex_float':'NPY_CFLOAT',
'complex_double':'NPY_CDOUBLE',
'complex_long_double':'NPY_CDOUBLE', # forced casting
'string':'NPY_CHAR'}
#These new maps aren't used anyhere yet, but should be by default
# unless building numeric or numarray extensions.
if using_newcore:
c2capi_map={'double': 'NPY_DOUBLE',
'float': 'NPY_FLOAT',
'long_double': 'NPY_LONGDOUBLE',
'char': 'NPY_BYTE',
'unsigned_char': 'NPY_UBYTE',
'signed_char': 'NPY_BYTE',
'short': 'NPY_SHORT',
'unsigned_short': 'NPY_USHORT',
'int': 'NPY_INT',
'unsigned': 'NPY_UINT',
'long': 'NPY_LONG',
'unsigned_long': 'NPY_ULONG',
'long_long': 'NPY_LONGLONG',
'unsigned_long_long': 'NPY_ULONGLONG',
'complex_float': 'NPY_CFLOAT',
'complex_double': 'NPY_CDOUBLE',
'complex_long_double': 'NPY_CDOUBLE',
'string': 'NPY_CHAR', # f2py 2e is not ready for NPY_STRING (must set itemisize etc)
#'string':'NPY_STRING'
}
c2pycode_map={'double':'d',
'float':'f',
'long_double':'d', # forced casting
'char':'1',
'signed_char':'1',
'unsigned_char':'b',
'short':'s',
'unsigned_short':'w',
'int':'i',
'unsigned':'u',
'long':'l',
'long_long':'L',
'complex_float':'F',
'complex_double':'D',
'complex_long_double':'D', # forced casting
'string':'c'
}
if using_newcore:
c2pycode_map={'double':'d',
'float':'f',
'long_double':'g',
'char':'b',
'unsigned_char':'B',
'signed_char':'b',
'short':'h',
'unsigned_short':'H',
'int':'i',
'unsigned':'I',
'long':'l',
'unsigned_long':'L',
'long_long':'q',
'unsigned_long_long':'Q',
'complex_float':'F',
'complex_double':'D',
'complex_long_double':'G',
'string':'S'}
c2buildvalue_map={'double':'d',
'float':'f',
'char':'b',
'signed_char':'b',
'short':'h',
'int':'i',
'long':'l',
'long_long':'L',
'complex_float':'N',
'complex_double':'N',
'complex_long_double':'N',
'string':'z'}
if sys.version_info[0] >= 3:
# Bytes, not Unicode strings
c2buildvalue_map['string'] = 'y'
if using_newcore:
#c2buildvalue_map=???
pass
f2cmap_all={'real':{'':'float','4':'float','8':'double','12':'long_double','16':'long_double'},
'integer':{'':'int','1':'signed_char','2':'short','4':'int','8':'long_long',
'-1':'unsigned_char','-2':'unsigned_short','-4':'unsigned',
'-8':'unsigned_long_long'},
'complex':{'':'complex_float','8':'complex_float',
'16':'complex_double','24':'complex_long_double',
'32':'complex_long_double'},
'complexkind':{'':'complex_float','4':'complex_float',
'8':'complex_double','12':'complex_long_double',
'16':'complex_long_double'},
'logical':{'':'int','1':'char','2':'short','4':'int','8':'long_long'},
'double complex':{'':'complex_double'},
'double precision':{'':'double'},
'byte':{'':'char'},
'character':{'':'string'}
}
if os.path.isfile('.f2py_f2cmap'):
# User defined additions to f2cmap_all.
# .f2py_f2cmap must contain a dictionary of dictionaries, only.
# For example, {'real':{'low':'float'}} means that Fortran 'real(low)' is
# interpreted as C 'float'.
# This feature is useful for F90/95 users if they use PARAMETERSs
# in type specifications.
try:
outmess('Reading .f2py_f2cmap ...\n')
f = open('.f2py_f2cmap', 'r')
d = eval(f.read(), {}, {})
f.close()
for k, d1 in list(d.items()):
for k1 in list(d1.keys()):
d1[k1.lower()] = d1[k1]
d[k.lower()] = d[k]
for k in list(d.keys()):
if k not in f2cmap_all:
f2cmap_all[k]={}
for k1 in list(d[k].keys()):
if d[k][k1] in c2py_map:
if k1 in f2cmap_all[k]:
outmess("\tWarning: redefinition of {'%s':{'%s':'%s'->'%s'}}\n"%(k, k1, f2cmap_all[k][k1], d[k][k1]))
f2cmap_all[k][k1] = d[k][k1]
outmess('\tMapping "%s(kind=%s)" to "%s"\n' % (k, k1, d[k][k1]))
else:
errmess("\tIgnoring map {'%s':{'%s':'%s'}}: '%s' must be in %s\n"%(k, k1, d[k][k1], d[k][k1], list(c2py_map.keys())))
outmess('Succesfully applied user defined changes from .f2py_f2cmap\n')
except Exception as msg:
errmess('Failed to apply user defined changes from .f2py_f2cmap: %s. Skipping.\n' % (msg))
cformat_map={'double': '%g',
'float': '%g',
'long_double': '%Lg',
'char': '%d',
'signed_char': '%d',
'unsigned_char': '%hhu',
'short': '%hd',
'unsigned_short': '%hu',
'int': '%d',
'unsigned': '%u',
'long': '%ld',
'unsigned_long': '%lu',
'long_long': '%ld',
'complex_float': '(%g,%g)',
'complex_double': '(%g,%g)',
'complex_long_double': '(%Lg,%Lg)',
'string': '%s',
}
############### Auxiliary functions
def getctype(var):
"""
Determines C type
"""
ctype='void'
if isfunction(var):
if 'result' in var:
a=var['result']
else:
a=var['name']
if a in var['vars']:
return getctype(var['vars'][a])
else:
errmess('getctype: function %s has no return value?!\n'%a)
elif issubroutine(var):
return ctype
elif 'typespec' in var and var['typespec'].lower() in f2cmap_all:
typespec = var['typespec'].lower()
f2cmap=f2cmap_all[typespec]
ctype=f2cmap[''] # default type
if 'kindselector' in var:
if '*' in var['kindselector']:
try:
ctype=f2cmap[var['kindselector']['*']]
except KeyError:
errmess('getctype: "%s %s %s" not supported.\n'%(var['typespec'], '*', var['kindselector']['*']))
elif 'kind' in var['kindselector']:
if typespec+'kind' in f2cmap_all:
f2cmap=f2cmap_all[typespec+'kind']
try:
ctype=f2cmap[var['kindselector']['kind']]
except KeyError:
if typespec in f2cmap_all:
f2cmap=f2cmap_all[typespec]
try:
ctype=f2cmap[str(var['kindselector']['kind'])]
except KeyError:
errmess('getctype: "%s(kind=%s)" is mapped to C "%s" (to override define dict(%s = dict(%s="<C typespec>")) in %s/.f2py_f2cmap file).\n'\
%(typespec, var['kindselector']['kind'], ctype,
typespec, var['kindselector']['kind'], os.getcwd()))
else:
if not isexternal(var):
errmess('getctype: No C-type found in "%s", assuming void.\n'%var)
return ctype
def getstrlength(var):
if isstringfunction(var):
if 'result' in var:
a=var['result']
else:
a=var['name']
if a in var['vars']:
return getstrlength(var['vars'][a])
else:
errmess('getstrlength: function %s has no return value?!\n'%a)
if not isstring(var):
errmess('getstrlength: expected a signature of a string but got: %s\n'%(repr(var)))
len='1'
if 'charselector' in var:
a=var['charselector']
if '*' in a:
len=a['*']
elif 'len' in a:
len=a['len']
if re.match(r'\(\s*([*]|[:])\s*\)', len) or re.match(r'([*]|[:])', len):
#if len in ['(*)','*','(:)',':']:
if isintent_hide(var):
errmess('getstrlength:intent(hide): expected a string with defined length but got: %s\n'%(repr(var)))
len='-1'
return len
def getarrdims(a,var,verbose=0):
global depargs
ret={}
if isstring(var) and not isarray(var):
ret['dims']=getstrlength(var)
ret['size']=ret['dims']
ret['rank']='1'
elif isscalar(var):
ret['size']='1'
ret['rank']='0'
ret['dims']=''
elif isarray(var):
# if not isintent_c(var):
# var['dimension'].reverse()
dim=copy.copy(var['dimension'])
ret['size']='*'.join(dim)
try: ret['size']=repr(eval(ret['size']))
except: pass
ret['dims']=','.join(dim)
ret['rank']=repr(len(dim))
ret['rank*[-1]']=repr(len(dim)*[-1])[1:-1]
for i in range(len(dim)): # solve dim for dependecies
v=[]
if dim[i] in depargs: v=[dim[i]]
else:
for va in depargs:
if re.match(r'.*?\b%s\b.*'%va, dim[i]):
v.append(va)
for va in v:
if depargs.index(va)>depargs.index(a):
dim[i]='*'
break
ret['setdims'], i='', -1
for d in dim:
i=i+1
if d not in ['*', ':', '(*)', '(:)']:
ret['setdims']='%s#varname#_Dims[%d]=%s,'%(ret['setdims'], i, d)
if ret['setdims']: ret['setdims']=ret['setdims'][:-1]
ret['cbsetdims'], i='', -1
for d in var['dimension']:
i=i+1
if d not in ['*', ':', '(*)', '(:)']:
ret['cbsetdims']='%s#varname#_Dims[%d]=%s,'%(ret['cbsetdims'], i, d)
elif isintent_in(var):
outmess('getarrdims:warning: assumed shape array, using 0 instead of %r\n' \
% (d))
ret['cbsetdims']='%s#varname#_Dims[%d]=%s,'%(ret['cbsetdims'], i, 0)
elif verbose :
errmess('getarrdims: If in call-back function: array argument %s must have bounded dimensions: got %s\n'%(repr(a), repr(d)))
if ret['cbsetdims']: ret['cbsetdims']=ret['cbsetdims'][:-1]
# if not isintent_c(var):
# var['dimension'].reverse()
return ret
def getpydocsign(a, var):
global lcb_map
if isfunction(var):
if 'result' in var:
af=var['result']
else:
af=var['name']
if af in var['vars']:
return getpydocsign(af, var['vars'][af])
else:
errmess('getctype: function %s has no return value?!\n'%af)
return '', ''
sig, sigout=a, a
opt=''
if isintent_in(var): opt='input'
elif isintent_inout(var): opt='in/output'
out_a = a
if isintent_out(var):
for k in var['intent']:
if k[:4]=='out=':
out_a = k[4:]
break
init=''
ctype=getctype(var)
if hasinitvalue(var):
init, showinit=getinit(a, var)
init = ', optional\\n Default: %s' % showinit
if isscalar(var):
if isintent_inout(var):
sig='%s : %s rank-0 array(%s,\'%s\')%s'%(a, opt, c2py_map[ctype],
c2pycode_map[ctype], init)
else:
sig='%s : %s %s%s'%(a, opt, c2py_map[ctype], init)
sigout='%s : %s'%(out_a, c2py_map[ctype])
elif isstring(var):
if isintent_inout(var):
sig='%s : %s rank-0 array(string(len=%s),\'c\')%s'%(a, opt, getstrlength(var), init)
else:
sig='%s : %s string(len=%s)%s'%(a, opt, getstrlength(var), init)
sigout='%s : string(len=%s)'%(out_a, getstrlength(var))
elif isarray(var):
dim=var['dimension']
rank=repr(len(dim))
sig='%s : %s rank-%s array(\'%s\') with bounds (%s)%s'%(a, opt, rank,
c2pycode_map[ctype],
','.join(dim), init)
if a==out_a:
sigout='%s : rank-%s array(\'%s\') with bounds (%s)'\
%(a, rank, c2pycode_map[ctype], ','.join(dim))
else:
sigout='%s : rank-%s array(\'%s\') with bounds (%s) and %s storage'\
%(out_a, rank, c2pycode_map[ctype], ','.join(dim), a)
elif isexternal(var):
ua=''
if a in lcb_map and lcb_map[a] in lcb2_map and 'argname' in lcb2_map[lcb_map[a]]:
ua=lcb2_map[lcb_map[a]]['argname']
if not ua==a: ua=' => %s'%ua
else: ua=''
sig='%s : call-back function%s'%(a, ua)
sigout=sig
else:
errmess('getpydocsign: Could not resolve docsignature for "%s".\\n'%a)
return sig, sigout
def getarrdocsign(a, var):
ctype=getctype(var)
if isstring(var) and (not isarray(var)):
sig='%s : rank-0 array(string(len=%s),\'c\')'%(a, getstrlength(var))
elif isscalar(var):
sig='%s : rank-0 array(%s,\'%s\')'%(a, c2py_map[ctype],
c2pycode_map[ctype],)
elif isarray(var):
dim=var['dimension']
rank=repr(len(dim))
sig='%s : rank-%s array(\'%s\') with bounds (%s)'%(a, rank,
c2pycode_map[ctype],
','.join(dim))
return sig
def getinit(a, var):
if isstring(var): init, showinit='""', "''"
else: init, showinit='', ''
if hasinitvalue(var):
init=var['=']
showinit=init
if iscomplex(var) or iscomplexarray(var):
ret={}
try:
v = var["="]
if ',' in v:
ret['init.r'], ret['init.i']=markoutercomma(v[1:-1]).split('@,@')
else:
v = eval(v, {}, {})
ret['init.r'], ret['init.i']=str(v.real), str(v.imag)
except:
raise ValueError('getinit: expected complex number `(r,i)\' but got `%s\' as initial value of %r.' % (init, a))
if isarray(var):
init='(capi_c.r=%s,capi_c.i=%s,capi_c)'%(ret['init.r'], ret['init.i'])
elif isstring(var):
if not init: init, showinit='""', "''"
if init[0]=="'":
init='"%s"'%(init[1:-1].replace('"', '\\"'))
if init[0]=='"': showinit="'%s'"%(init[1:-1])
return init, showinit
def sign2map(a, var):
"""
varname,ctype,atype
init,init.r,init.i,pytype
vardebuginfo,vardebugshowvalue,varshowvalue
varrfromat
intent
"""
global lcb_map, cb_map
out_a = a
if isintent_out(var):
for k in var['intent']:
if k[:4]=='out=':
out_a = k[4:]
break
ret={'varname':a,'outvarname':out_a}
ret['ctype']=getctype(var)
intent_flags = []
for f, s in isintent_dict.items():
if f(var): intent_flags.append('F2PY_%s'%s)
if intent_flags:
#XXX: Evaluate intent_flags here.
ret['intent'] = '|'.join(intent_flags)
else:
ret['intent'] = 'F2PY_INTENT_IN'
if isarray(var): ret['varrformat']='N'
elif ret['ctype'] in c2buildvalue_map:
ret['varrformat']=c2buildvalue_map[ret['ctype']]
else: ret['varrformat']='O'
ret['init'], ret['showinit']=getinit(a, var)
if hasinitvalue(var) and iscomplex(var) and not isarray(var):
ret['init.r'], ret['init.i'] = markoutercomma(ret['init'][1:-1]).split('@,@')
if isexternal(var):
ret['cbnamekey']=a
if a in lcb_map:
ret['cbname']=lcb_map[a]
ret['maxnofargs']=lcb2_map[lcb_map[a]]['maxnofargs']
ret['nofoptargs']=lcb2_map[lcb_map[a]]['nofoptargs']
ret['cbdocstr']=lcb2_map[lcb_map[a]]['docstr']
ret['cblatexdocstr']=lcb2_map[lcb_map[a]]['latexdocstr']
else:
ret['cbname']=a
errmess('sign2map: Confused: external %s is not in lcb_map%s.\n'%(a, list(lcb_map.keys())))
if isstring(var):
ret['length']=getstrlength(var)
if isarray(var):
ret=dictappend(ret, getarrdims(a, var))
dim=copy.copy(var['dimension'])
if ret['ctype'] in c2capi_map:
ret['atype']=c2capi_map[ret['ctype']]
# Debug info
if debugcapi(var):
il=[isintent_in, 'input', isintent_out, 'output',
isintent_inout, 'inoutput', isrequired, 'required',
isoptional, 'optional', isintent_hide, 'hidden',
iscomplex, 'complex scalar',
l_and(isscalar, l_not(iscomplex)), 'scalar',
isstring, 'string', isarray, 'array',
iscomplexarray, 'complex array', isstringarray, 'string array',
iscomplexfunction, 'complex function',
l_and(isfunction, l_not(iscomplexfunction)), 'function',
isexternal, 'callback',
isintent_callback, 'callback',
isintent_aux, 'auxiliary',
#ismutable,'mutable',l_not(ismutable),'immutable',
]
rl=[]
for i in range(0, len(il), 2):
if il[i](var): rl.append(il[i+1])
if isstring(var):
rl.append('slen(%s)=%s'%(a, ret['length']))
if isarray(var):
# if not isintent_c(var):
# var['dimension'].reverse()
ddim=','.join(map(lambda x, y:'%s|%s'%(x, y), var['dimension'], dim))
rl.append('dims(%s)'%ddim)
# if not isintent_c(var):
# var['dimension'].reverse()
if isexternal(var):
ret['vardebuginfo']='debug-capi:%s=>%s:%s'%(a, ret['cbname'], ','.join(rl))
else:
ret['vardebuginfo']='debug-capi:%s %s=%s:%s'%(ret['ctype'], a, ret['showinit'], ','.join(rl))
if isscalar(var):
if ret['ctype'] in cformat_map:
ret['vardebugshowvalue']='debug-capi:%s=%s'%(a, cformat_map[ret['ctype']])
if isstring(var):
ret['vardebugshowvalue']='debug-capi:slen(%s)=%%d %s=\\"%%s\\"'%(a, a)
if isexternal(var):
ret['vardebugshowvalue']='debug-capi:%s=%%p'%(a)
if ret['ctype'] in cformat_map:
ret['varshowvalue']='#name#:%s=%s'%(a, cformat_map[ret['ctype']])
ret['showvalueformat']='%s'%(cformat_map[ret['ctype']])
if isstring(var):
ret['varshowvalue']='#name#:slen(%s)=%%d %s=\\"%%s\\"'%(a, a)
ret['pydocsign'], ret['pydocsignout']=getpydocsign(a, var)
if hasnote(var):
ret['note']=var['note']
return ret
def routsign2map(rout):
"""
name,NAME,begintitle,endtitle
rname,ctype,rformat
routdebugshowvalue
"""
global lcb_map
name = rout['name']
fname = getfortranname(rout)
ret={'name': name,
'texname': name.replace('_', '\\_'),
'name_lower': name.lower(),
'NAME': name.upper(),
'begintitle': gentitle(name),
'endtitle': gentitle('end of %s'%name),
'fortranname': fname,
'FORTRANNAME': fname.upper(),
'callstatement': getcallstatement(rout) or '',
'usercode': getusercode(rout) or '',
'usercode1': getusercode1(rout) or '',
}
if '_' in fname:
ret['F_FUNC'] = 'F_FUNC_US'
else:
ret['F_FUNC'] = 'F_FUNC'
if '_' in name:
ret['F_WRAPPEDFUNC'] = 'F_WRAPPEDFUNC_US'
else:
ret['F_WRAPPEDFUNC'] = 'F_WRAPPEDFUNC'
lcb_map={}
if 'use' in rout:
for u in rout['use'].keys():
if u in cb_rules.cb_map:
for un in cb_rules.cb_map[u]:
ln=un[0]
if 'map' in rout['use'][u]:
for k in rout['use'][u]['map'].keys():
if rout['use'][u]['map'][k]==un[0]: ln=k;break
lcb_map[ln]=un[1]
#else:
# errmess('routsign2map: cb_map does not contain module "%s" used in "use" statement.\n'%(u))
elif 'externals' in rout and rout['externals']:
errmess('routsign2map: Confused: function %s has externals %s but no "use" statement.\n'%(ret['name'], repr(rout['externals'])))
ret['callprotoargument'] = getcallprotoargument(rout, lcb_map) or ''
if isfunction(rout):
if 'result' in rout:
a=rout['result']
else:
a=rout['name']
ret['rname']=a
ret['pydocsign'], ret['pydocsignout']=getpydocsign(a, rout)
ret['ctype']=getctype(rout['vars'][a])
if hasresultnote(rout):
ret['resultnote']=rout['vars'][a]['note']
rout['vars'][a]['note']=['See elsewhere.']
if ret['ctype'] in c2buildvalue_map:
ret['rformat']=c2buildvalue_map[ret['ctype']]
else:
ret['rformat']='O'
errmess('routsign2map: no c2buildvalue key for type %s\n'%(repr(ret['ctype'])))
if debugcapi(rout):
if ret['ctype'] in cformat_map:
ret['routdebugshowvalue']='debug-capi:%s=%s'%(a, cformat_map[ret['ctype']])
if isstringfunction(rout):
ret['routdebugshowvalue']='debug-capi:slen(%s)=%%d %s=\\"%%s\\"'%(a, a)
if isstringfunction(rout):
ret['rlength']=getstrlength(rout['vars'][a])
if ret['rlength']=='-1':
errmess('routsign2map: expected explicit specification of the length of the string returned by the fortran function %s; taking 10.\n'%(repr(rout['name'])))
ret['rlength']='10'
if hasnote(rout):
ret['note']=rout['note']
rout['note']=['See elsewhere.']
return ret
def modsign2map(m):
"""
modulename
"""
if ismodule(m):
ret={'f90modulename':m['name'],
'F90MODULENAME':m['name'].upper(),
'texf90modulename':m['name'].replace('_', '\\_')}
else:
ret={'modulename':m['name'],
'MODULENAME':m['name'].upper(),
'texmodulename':m['name'].replace('_', '\\_')}
ret['restdoc'] = getrestdoc(m) or []
if hasnote(m):
ret['note']=m['note']
#m['note']=['See elsewhere.']
ret['usercode'] = getusercode(m) or ''
ret['usercode1'] = getusercode1(m) or ''
if m['body']:
ret['interface_usercode'] = getusercode(m['body'][0]) or ''
else:
ret['interface_usercode'] = ''
ret['pymethoddef'] = getpymethoddef(m) or ''
if 'coutput' in m:
ret['coutput'] = m['coutput']
if 'f2py_wrapper_output' in m:
ret['f2py_wrapper_output'] = m['f2py_wrapper_output']
return ret
def cb_sign2map(a,var,index=None):
ret={'varname':a}
if index is None or 1: # disable 7712 patch
ret['varname_i'] = ret['varname']
else:
ret['varname_i'] = ret['varname'] + '_' + str(index)
ret['ctype']=getctype(var)
if ret['ctype'] in c2capi_map:
ret['atype']=c2capi_map[ret['ctype']]
if ret['ctype'] in cformat_map:
ret['showvalueformat']='%s'%(cformat_map[ret['ctype']])
if isarray(var):
ret=dictappend(ret, getarrdims(a, var))
ret['pydocsign'], ret['pydocsignout']=getpydocsign(a, var)
if hasnote(var):
ret['note']=var['note']
var['note']=['See elsewhere.']
return ret
def cb_routsign2map(rout, um):
"""
name,begintitle,endtitle,argname
ctype,rctype,maxnofargs,nofoptargs,returncptr
"""
ret={'name':'cb_%s_in_%s'%(rout['name'], um),
'returncptr':''}
if isintent_callback(rout):
if '_' in rout['name']:
F_FUNC='F_FUNC_US'
else:
F_FUNC='F_FUNC'
ret['callbackname'] = '%s(%s,%s)' \
% (F_FUNC,
rout['name'].lower(),
rout['name'].upper(),
)
ret['static'] = 'extern'
else:
ret['callbackname'] = ret['name']
ret['static'] = 'static'
ret['argname']=rout['name']
ret['begintitle']=gentitle(ret['name'])
ret['endtitle']=gentitle('end of %s'%ret['name'])
ret['ctype']=getctype(rout)
ret['rctype']='void'
if ret['ctype']=='string': ret['rctype']='void'
else:
ret['rctype']=ret['ctype']
if ret['rctype']!='void':
if iscomplexfunction(rout):
ret['returncptr'] = """
#ifdef F2PY_CB_RETURNCOMPLEX
return_value=
#endif
"""
else:
ret['returncptr'] = 'return_value='
if ret['ctype'] in cformat_map:
ret['showvalueformat']='%s'%(cformat_map[ret['ctype']])
if isstringfunction(rout):
ret['strlength']=getstrlength(rout)
if isfunction(rout):
if 'result' in rout:
a=rout['result']
else:
a=rout['name']
if hasnote(rout['vars'][a]):
ret['note']=rout['vars'][a]['note']
rout['vars'][a]['note']=['See elsewhere.']
ret['rname']=a
ret['pydocsign'], ret['pydocsignout']=getpydocsign(a, rout)
if iscomplexfunction(rout):
ret['rctype']="""
#ifdef F2PY_CB_RETURNCOMPLEX
#ctype#
#else
void
#endif
"""
else:
if hasnote(rout):
ret['note']=rout['note']
rout['note']=['See elsewhere.']
nofargs=0
nofoptargs=0
if 'args' in rout and 'vars' in rout:
for a in rout['args']:
var=rout['vars'][a]
if l_or(isintent_in, isintent_inout)(var):
nofargs=nofargs+1
if isoptional(var):
nofoptargs=nofoptargs+1
ret['maxnofargs']=repr(nofargs)
ret['nofoptargs']=repr(nofoptargs)
if hasnote(rout) and isfunction(rout) and 'result' in rout:
ret['routnote']=rout['note']
rout['note']=['See elsewhere.']
return ret
def common_sign2map(a, var): # obsolute
ret={'varname':a}
ret['ctype']=getctype(var)
if isstringarray(var):
ret['ctype']='char'
if ret['ctype'] in c2capi_map:
ret['atype']=c2capi_map[ret['ctype']]
if ret['ctype'] in cformat_map:
ret['showvalueformat']='%s'%(cformat_map[ret['ctype']])
if isarray(var):
ret=dictappend(ret, getarrdims(a, var))
elif isstring(var):
ret['size']=getstrlength(var)
ret['rank']='1'
ret['pydocsign'], ret['pydocsignout']=getpydocsign(a, var)
if hasnote(var):
ret['note']=var['note']
var['note']=['See elsewhere.']
ret['arrdocstr']=getarrdocsign(a, var) # for strings this returns 0-rank but actually is 1-rank
return ret
|
TakayukiSakai/tensorflow
|
refs/heads/master
|
tensorflow/contrib/tensor_forest/python/kernel_tests/best_splits_op_test.py
|
3
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.tensor_forest.ops.best_splits_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow # pylint: disable=unused-import
from tensorflow.contrib.tensor_forest.python.ops import training_ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class BestSplitsClassificationTests(test_util.TensorFlowTestCase):
def setUp(self):
self.finished = [3, 5]
self.node_map = [-1, -1, -1, 0, -1, 3, -1, -1, -1]
self.candidate_counts = [[[50., 60., 40., 3.], [70., 30., 70., 30.]],
[[0., 0., 0., 0.], [0., 0., 0., 0.]],
[[0., 0., 0., 0.], [0., 0., 0., 0.]],
[[10., 10., 10., 10.], [10., 5., 5., 10.]]]
self.total_counts = [[100., 100., 100., 100.],
[0., 0., 0., 0.],
[0., 0., 0., 0.],
[100., 100., 100., 100.]]
self.squares = []
self.ops = training_ops.Load()
def testSimple(self):
with self.test_session():
split_indices = self.ops.best_splits(
self.finished, self.node_map, self.candidate_counts, self.squares,
self.total_counts, self.squares, regression=False)
self.assertAllEqual([0, 1], split_indices.eval())
def testNoFinished(self):
with self.test_session():
split_indices = self.ops.best_splits(
[], self.node_map, self.candidate_counts, self.squares,
self.total_counts, self.squares, regression=False)
self.assertAllEqual([], split_indices.eval())
def testBadInput(self):
del self.total_counts[1]
with self.test_session():
with self.assertRaisesOpError(
'Number of accumulators should be the same in split_sums '
'and accumulator_sums.'):
self.ops.best_splits(
self.finished, self.node_map, self.candidate_counts, self.squares,
self.total_counts, self.squares, regression=False).eval()
class BestSplitsRegressionTests(test_util.TensorFlowTestCase):
def setUp(self):
self.finished = [3, 5]
self.node_map = [-1, -1, -1, 0, -1, 3, -1, -1, -1]
self.candidate_sums = [[[5., 8., 8., 8.], [5., 10., 10., 10.]],
[[0., 0., 0., 0.], [0., 0., 0., 0.]],
[[0., 0., 0., 0.], [0., 0., 0., 0.]],
[[10., 10., 20., 10.], [10., 5., 5., 5.]]]
self.candidate_squares = [[[5., 50., 50., 50.], [5., 50., 50., 50.]],
[[0., 0., 0., 0.], [0., 0., 0., 0.]],
[[0., 0., 0., 0.], [0., 0., 0., 0.]],
[[10., 40., 50., 60.], [10., 40., 40., 40.]]]
self.total_sums = [[15., 10., 10., 10.],
[0., 0., 0., 0.],
[0., 0., 0., 0.],
[20., 20., 20., 20.]]
self.total_squares = [[15., 50., 50., 50.],
[0., 0., 0., 0.],
[0., 0., 0., 0.],
[20., 60., 60., 60.]]
self.ops = training_ops.Load()
def testSimple(self):
with self.test_session():
split_indices = self.ops.best_splits(
self.finished, self.node_map, self.candidate_sums,
self.candidate_squares, self.total_sums, self.total_squares,
regression=True)
self.assertAllEqual([1, 0], split_indices.eval())
if __name__ == '__main__':
googletest.main()
|
vmindru/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/azure/azure_rm_containerinstance.py
|
33
|
#!/usr/bin/python
#
# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_containerinstance
version_added: "2.5"
short_description: Manage an Azure Container Instance.
description:
- Create, update and delete an Azure Container Instance.
options:
resource_group:
description:
- Name of resource group.
required: true
name:
description:
- The name of the container group.
required: true
os_type:
description:
- The OS type of containers.
choices:
- linux
- windows
default: linux
state:
description:
- Assert the state of the container instance. Use 'present' to create or update an container instance and 'absent' to delete it.
default: present
choices:
- absent
- present
ip_address:
description:
- The IP address type of the container group (default is 'none')
choices:
- public
- none
default: 'none'
ports:
description:
- List of ports exposed within the container group.
location:
description:
- Valid azure location. Defaults to location of the resource group.
registry_login_server:
description:
- The container image registry login server.
registry_username:
description:
- The username to log in container image registry server.
registry_password:
description:
- The password to log in container image registry server.
containers:
description:
- List of containers.
suboptions:
name:
description:
- The name of the container instance.
required: true
image:
description:
- The container image name.
required: true
memory:
description:
- The required memory of the containers in GB.
default: 1.5
cpu:
description:
- The required number of CPU cores of the containers.
default: 1
ports:
description:
- List of ports exposed within the container group.
force_update:
description:
- Force update of existing container instance. Any update will result in deletion and recreation of existing containers.
type: bool
default: 'no'
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Zim Kalinowski (@zikalino)"
'''
EXAMPLES = '''
- name: Create sample container group
azure_rm_containerinstance:
resource_group: testrg
name: mynewcontainergroup
os_type: linux
ip_address: public
ports:
- 80
- 81
containers:
- name: mycontainer1
image: httpd
memory: 1.5
ports:
- 80
'''
RETURN = '''
id:
description:
- Resource ID
returned: always
type: str
sample: /subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/TestGroup/providers/Microsoft.ContainerInstance/containerGroups/aci1b6dd89
provisioning_state:
description:
- Provisioning state of the container.
returned: always
type: str
sample: Creating
ip_address:
description:
- Public IP Address of created container group.
returned: if address is public
type: str
sample: 175.12.233.11
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
from azure.mgmt.containerinstance import ContainerInstanceManagementClient
except ImportError:
# This is handled in azure_rm_common
pass
def create_container_dict_from_obj(container):
'''
Create a dict from an instance of a Container.
:param rule: Container
:return: dict
'''
results = dict(
name=container.name,
image=container.image,
memory=container.resources.requests.memory_in_gb,
cpu=container.resources.requests.cpu
# command (list of str)
# ports (list of ContainerPort)
# environment_variables (list of EnvironmentVariable)
# resources (ResourceRequirements)
# volume mounts (list of VolumeMount)
)
if container.instance_view is not None:
# instance_view (ContainerPropertiesInstanceView)
results["instance_restart_count"] = container.instance_view.restart_count
if container.instance_view.current_state:
results["instance_current_state"] = container.instance_view.current_state.state
results["instance_current_start_time"] = container.instance_view.current_state.start_time
results["instance_current_exit_code"] = container.instance_view.current_state.exit_code
results["instance_current_finish_time"] = container.instance_view.current_state.finish_time
results["instance_current_detail_status"] = container.instance_view.current_state.detail_status
if container.instance_view.previous_state:
results["instance_previous_state"] = container.instance_view.previous_state.state
results["instance_previous_start_time"] = container.instance_view.previous_state.start_time
results["instance_previous_exit_code"] = container.instance_view.previous_state.exit_code
results["instance_previous_finish_time"] = container.instance_view.previous_state.finish_time
results["instance_previous_detail_status"] = container.instance_view.previous_state.detail_status
# events (list of ContainerEvent)
return results
class AzureRMContainerInstance(AzureRMModuleBase):
"""Configuration class for an Azure RM container instance resource"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
os_type=dict(
type='str',
default='linux',
choices=['linux', 'windows']
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
),
location=dict(
type='str',
),
ip_address=dict(
type='str',
default='none',
choices=['public', 'none']
),
ports=dict(
type='list',
default=[]
),
registry_login_server=dict(
type='str',
default=None
),
registry_username=dict(
type='str',
default=None
),
registry_password=dict(
type='str',
default=None,
no_log=True
),
containers=dict(
type='list',
required=True
),
force_update=dict(
type='bool',
default=False
),
)
self.resource_group = None
self.name = None
self.location = None
self.state = None
self.ip_address = None
self.containers = None
self.tags = None
self.results = dict(changed=False, state=dict())
self.cgmodels = None
super(AzureRMContainerInstance, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()) + ['tags']:
setattr(self, key, kwargs[key])
resource_group = None
response = None
results = dict()
# since this client hasn't been upgraded to expose models directly off the OperationClass, fish them out
self.cgmodels = self.containerinstance_client.container_groups.models
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
self.location = resource_group.location
response = self.get_containerinstance()
if not response:
self.log("Container Group doesn't exist")
if self.state == 'absent':
self.log("Nothing to delete")
else:
self.force_update = True
else:
self.log("Container instance already exists")
if self.state == 'absent':
if not self.check_mode:
self.delete_containerinstance()
self.results['changed'] = True
self.log("Container instance deleted")
elif self.state == 'present':
self.log("Need to check if container group has to be deleted or may be updated")
update_tags, newtags = self.update_tags(response.get('tags', dict()))
if update_tags:
self.tags = newtags
if self.force_update:
self.log('Deleting container instance before update')
if not self.check_mode:
self.delete_containerinstance()
if self.state == 'present':
self.log("Need to Create / Update the container instance")
if self.force_update:
self.results['changed'] = True
if self.check_mode:
return self.results
response = self.create_update_containerinstance()
self.results['id'] = response['id']
self.results['provisioning_state'] = response['provisioning_state']
self.results['ip_address'] = response['ip_address']['ip']
self.log("Creation / Update done")
return self.results
def create_update_containerinstance(self):
'''
Creates or updates a container service with the specified configuration of orchestrator, masters, and agents.
:return: deserialized container instance state dictionary
'''
self.log("Creating / Updating the container instance {0}".format(self.name))
registry_credentials = None
if self.registry_login_server is not None:
registry_credentials = [self.cgmodels.ImageRegistryCredential(server=self.registry_login_server,
username=self.registry_username,
password=self.registry_password)]
ip_address = None
if self.ip_address == 'public':
# get list of ports
if self.ports:
ports = []
for port in self.ports:
ports.append(self.cgmodels.Port(port=port, protocol="TCP"))
ip_address = self.cgmodels.IpAddress(ports=ports, ip=self.ip_address)
containers = []
for container_def in self.containers:
name = container_def.get("name")
image = container_def.get("image")
memory = container_def.get("memory", 1.5)
cpu = container_def.get("cpu", 1)
ports = []
port_list = container_def.get("ports")
if port_list:
for port in port_list:
ports.append(self.cgmodels.ContainerPort(port=port))
containers.append(self.cgmodels.Container(name=name,
image=image,
resources=self.cgmodels.ResourceRequirements(
requests=self.cgmodels.ResourceRequests(memory_in_gb=memory, cpu=cpu)
),
ports=ports))
parameters = self.cgmodels.ContainerGroup(location=self.location,
containers=containers,
image_registry_credentials=registry_credentials,
restart_policy=None,
ip_address=ip_address,
os_type=self.os_type,
volumes=None,
tags=self.tags)
response = self.containerinstance_client.container_groups.create_or_update(resource_group_name=self.resource_group,
container_group_name=self.name,
container_group=parameters)
if isinstance(response, AzureOperationPoller):
response = self.get_poller_result(response)
return response.as_dict()
def delete_containerinstance(self):
'''
Deletes the specified container group instance in the specified subscription and resource group.
:return: True
'''
self.log("Deleting the container instance {0}".format(self.name))
response = self.containerinstance_client.container_groups.delete(resource_group_name=self.resource_group, container_group_name=self.name)
return True
def get_containerinstance(self):
'''
Gets the properties of the specified container service.
:return: deserialized container instance state dictionary
'''
self.log("Checking if the container instance {0} is present".format(self.name))
found = False
try:
response = self.containerinstance_client.container_groups.get(resource_group_name=self.resource_group, container_group_name=self.name)
found = True
self.log("Response : {0}".format(response))
self.log("Container instance : {0} found".format(response.name))
except CloudError as e:
self.log('Did not find the container instance.')
if found is True:
return response.as_dict()
return False
def main():
"""Main execution"""
AzureRMContainerInstance()
if __name__ == '__main__':
main()
|
SerendpityZOEY/Fixr-RelevantCodeSearch
|
refs/heads/master
|
PythonScripts/test.py
|
2
|
from git import Repo
import os, json, pysolr
join = os.path.join
# rorepo is a Repo instance pointing to the git-python repository.
# For all you know, the first argument to Repo is a path to the repository
# you want to work with
repo = Repo("../../book")
assert not repo.bare
repo.config_reader()
cw = repo.config_writer()
cw.release()
print repo.commit("master")
fifty_first_commits = list(repo.iter_commits('master', max_count=50))
print fifty_first_commits
response = []
for i in range(1,50):
commit = fifty_first_commits[i]
# print commit.message
response.append({'commit': commit.message,'committerName': commit.committer.name,'authorName: ': commit.author.name})
print json.dumps(response)
with open('items.json', 'w+') as outfile:
json.dump(response, outfile)
solr = pysolr.Solr('http://localhost:8983/solr/gettingstarted_shard1_replica1', timeout=10)
items = json.load(open('items.json'))
for item in items:
item['id'] = item['commit']
solr.add(items)
#headcommit = repo.head.commit
#print headcommit.message
|
nsalomonis/AltAnalyze
|
refs/heads/master
|
stats_scripts/mpmath/tests/test_str.py
|
10
|
from mpmath import nstr, matrix, inf
def test_nstr():
m = matrix([[0.75, 0.190940654, -0.0299195971],
[0.190940654, 0.65625, 0.205663228],
[-0.0299195971, 0.205663228, 0.64453125e-20]])
assert nstr(m, 4, min_fixed=-inf) == \
'''[ 0.75 0.1909 -0.02992]
[ 0.1909 0.6563 0.2057]
[-0.02992 0.2057 0.000000000000000000006445]'''
assert nstr(m, 4) == \
'''[ 0.75 0.1909 -0.02992]
[ 0.1909 0.6563 0.2057]
[-0.02992 0.2057 6.445e-21]'''
|
kurtisharms/ubcexamcram
|
refs/heads/master
|
gaepytz-2011h/pytz/__init__.py
|
52
|
'''
datetime.tzinfo timezone definitions generated from the
Olson timezone database:
ftp://elsie.nci.nih.gov/pub/tz*.tar.gz
See the datetime section of the Python Library Reference for information
on how to use these modules.
'''
# The Olson database is updated several times a year.
OLSON_VERSION = '2010h'
VERSION = OLSON_VERSION
# Version format for a patch release - only one so far.
#VERSION = OLSON_VERSION + '.2'
__version__ = OLSON_VERSION
OLSEN_VERSION = OLSON_VERSION # Old releases had this misspelling
__all__ = [
'timezone', 'utc', 'country_timezones', 'country_names',
'AmbiguousTimeError', 'InvalidTimeError',
'NonExistentTimeError', 'UnknownTimeZoneError',
'all_timezones', 'all_timezones_set',
'common_timezones', 'common_timezones_set',
'loader',
]
import sys, datetime, os.path, gettext
from UserDict import DictMixin
from UserList import UserList
try:
from pkg_resources import resource_stream
except ImportError:
resource_stream = None
from tzinfo import AmbiguousTimeError, InvalidTimeError, NonExistentTimeError
from tzinfo import unpickler
from tzfile import build_tzinfo
# Use 2.3 sets module implementation if set builtin is not available
try:
set
except NameError:
from sets import Set as set
class TimezoneLoader(object):
def __init__(self):
self.available = {}
def open_resource(self, name):
"""Open a resource from the zoneinfo subdir for reading.
Uses the pkg_resources module if available and no standard file
found at the calculated location.
"""
name_parts = name.lstrip('/').split('/')
for part in name_parts:
if part == os.path.pardir or os.path.sep in part:
raise ValueError('Bad path segment: %r' % part)
filename = os.path.join(os.path.dirname(__file__),
'zoneinfo', *name_parts)
if not os.path.exists(filename) and resource_stream is not None:
# http://bugs.launchpad.net/bugs/383171 - we avoid using this
# unless absolutely necessary to help when a broken version of
# pkg_resources is installed.
return resource_stream(__name__, 'zoneinfo/' + name)
return open(filename, 'rb')
def resource_exists(self, name):
"""Return true if the given resource exists"""
if name not in self.available:
try:
self.open_resource(name)
self.available[name] = True
except IOError:
self.available[name] = False
return self.available[name]
loader = TimezoneLoader()
def open_resource(name):
return loader.open_resource(name)
def resource_exists(name):
return loader.resource_exists(name)
# Enable this when we get some translations?
# We want an i18n API that is useful to programs using Python's gettext
# module, as well as the Zope3 i18n package. Perhaps we should just provide
# the POT file and translations, and leave it up to callers to make use
# of them.
#
# t = gettext.translation(
# 'pytz', os.path.join(os.path.dirname(__file__), 'locales'),
# fallback=True
# )
# def _(timezone_name):
# """Translate a timezone name using the current locale, returning Unicode"""
# return t.ugettext(timezone_name)
class UnknownTimeZoneError(KeyError):
'''Exception raised when pytz is passed an unknown timezone.
>>> isinstance(UnknownTimeZoneError(), LookupError)
True
This class is actually a subclass of KeyError to provide backwards
compatibility with code relying on the undocumented behavior of earlier
pytz releases.
>>> isinstance(UnknownTimeZoneError(), KeyError)
True
'''
pass
_tzinfo_cache = {}
def timezone(zone):
r''' Return a datetime.tzinfo implementation for the given timezone
>>> from datetime import datetime, timedelta
>>> utc = timezone('UTC')
>>> eastern = timezone('US/Eastern')
>>> eastern.zone
'US/Eastern'
>>> timezone(u'US/Eastern') is eastern
True
>>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc)
>>> loc_dt = utc_dt.astimezone(eastern)
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> loc_dt.strftime(fmt)
'2002-10-27 01:00:00 EST (-0500)'
>>> (loc_dt - timedelta(minutes=10)).strftime(fmt)
'2002-10-27 00:50:00 EST (-0500)'
>>> eastern.normalize(loc_dt - timedelta(minutes=10)).strftime(fmt)
'2002-10-27 01:50:00 EDT (-0400)'
>>> (loc_dt + timedelta(minutes=10)).strftime(fmt)
'2002-10-27 01:10:00 EST (-0500)'
Raises UnknownTimeZoneError if passed an unknown zone.
>>> timezone('Asia/Shangri-La')
Traceback (most recent call last):
...
UnknownTimeZoneError: 'Asia/Shangri-La'
>>> timezone(u'\N{TRADE MARK SIGN}')
Traceback (most recent call last):
...
UnknownTimeZoneError: u'\u2122'
'''
if zone.upper() == 'UTC':
return utc
try:
zone = zone.encode('US-ASCII')
except UnicodeEncodeError:
# All valid timezones are ASCII
raise UnknownTimeZoneError(zone)
zone = _unmunge_zone(zone)
if zone not in _tzinfo_cache:
if resource_exists(zone):
_tzinfo_cache[zone] = build_tzinfo(zone, open_resource(zone))
else:
raise UnknownTimeZoneError(zone)
return _tzinfo_cache[zone]
def _unmunge_zone(zone):
"""Undo the time zone name munging done by older versions of pytz."""
return zone.replace('_plus_', '+').replace('_minus_', '-')
ZERO = datetime.timedelta(0)
HOUR = datetime.timedelta(hours=1)
class UTC(datetime.tzinfo):
"""UTC
Identical to the reference UTC implementation given in Python docs except
that it unpickles using the single module global instance defined beneath
this class declaration.
Also contains extra attributes and methods to match other pytz tzinfo
instances.
"""
zone = "UTC"
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
def __reduce__(self):
return _UTC, ()
def localize(self, dt, is_dst=False):
'''Convert naive time to local time'''
if dt.tzinfo is not None:
raise ValueError, 'Not naive datetime (tzinfo is already set)'
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
'''Correct the timezone information on the given datetime'''
if dt.tzinfo is None:
raise ValueError, 'Naive time - no tzinfo set'
return dt.replace(tzinfo=self)
def __repr__(self):
return "<UTC>"
def __str__(self):
return "UTC"
UTC = utc = UTC() # UTC is a singleton
def _UTC():
"""Factory function for utc unpickling.
Makes sure that unpickling a utc instance always returns the same
module global.
These examples belong in the UTC class above, but it is obscured; or in
the README.txt, but we are not depending on Python 2.4 so integrating
the README.txt examples with the unit tests is not trivial.
>>> import datetime, pickle
>>> dt = datetime.datetime(2005, 3, 1, 14, 13, 21, tzinfo=utc)
>>> naive = dt.replace(tzinfo=None)
>>> p = pickle.dumps(dt, 1)
>>> naive_p = pickle.dumps(naive, 1)
>>> len(p), len(naive_p), len(p) - len(naive_p)
(60, 43, 17)
>>> new = pickle.loads(p)
>>> new == dt
True
>>> new is dt
False
>>> new.tzinfo is dt.tzinfo
True
>>> utc is UTC is timezone('UTC')
True
>>> utc is timezone('GMT')
False
"""
return utc
_UTC.__safe_for_unpickling__ = True
def _p(*args):
"""Factory function for unpickling pytz tzinfo instances.
Just a wrapper around tzinfo.unpickler to save a few bytes in each pickle
by shortening the path.
"""
return unpickler(*args)
_p.__safe_for_unpickling__ = True
class _LazyDict(DictMixin):
"""Dictionary populated on first use."""
data = None
def __getitem__(self, key):
if self.data is None:
self._fill()
return self.data[key.upper()]
def keys(self):
if self.data is None:
self._fill()
return self.data.keys()
class _LazyList(UserList):
def __init__(self, func):
self._data = None
self._build = func
def data(self):
if self._data is None:
self._data = self._build()
return self._data
data = property(data)
class _CountryTimezoneDict(_LazyDict):
"""Map ISO 3166 country code to a list of timezone names commonly used
in that country.
iso3166_code is the two letter code used to identify the country.
>>> country_timezones['ch']
['Europe/Zurich']
>>> country_timezones['CH']
['Europe/Zurich']
>>> country_timezones[u'ch']
['Europe/Zurich']
>>> country_timezones['XXX']
Traceback (most recent call last):
...
KeyError: 'XXX'
Previously, this information was exposed as a function rather than a
dictionary. This is still supported::
>>> country_timezones('nz')
['Pacific/Auckland', 'Pacific/Chatham']
"""
def __call__(self, iso3166_code):
"""Backwards compatibility."""
return self[iso3166_code]
def _fill(self):
data = {}
zone_tab = open_resource('zone.tab')
for line in zone_tab:
if line.startswith('#'):
continue
code, coordinates, zone = line.split(None, 4)[:3]
if not resource_exists(zone):
continue
try:
data[code].append(zone)
except KeyError:
data[code] = [zone]
self.data = data
country_timezones = _CountryTimezoneDict()
class _CountryNameDict(_LazyDict):
'''Dictionary proving ISO3166 code -> English name.
>>> country_names['au']
'Australia'
'''
def _fill(self):
data = {}
zone_tab = open_resource('iso3166.tab')
for line in zone_tab.readlines():
if line.startswith('#'):
continue
code, name = line.split(None, 1)
data[code] = name.strip()
self.data = data
country_names = _CountryNameDict()
# Time-zone info based solely on fixed offsets
class _FixedOffset(datetime.tzinfo):
zone = None # to match the standard pytz API
def __init__(self, minutes):
if abs(minutes) >= 1440:
raise ValueError("absolute offset is too large", minutes)
self._minutes = minutes
self._offset = datetime.timedelta(minutes=minutes)
def utcoffset(self, dt):
return self._offset
def __reduce__(self):
return FixedOffset, (self._minutes, )
def dst(self, dt):
return None
def tzname(self, dt):
return None
def __repr__(self):
return 'pytz.FixedOffset(%d)' % self._minutes
def localize(self, dt, is_dst=False):
'''Convert naive time to local time'''
if dt.tzinfo is not None:
raise ValueError, 'Not naive datetime (tzinfo is already set)'
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
'''Correct the timezone information on the given datetime'''
if dt.tzinfo is None:
raise ValueError, 'Naive time - no tzinfo set'
return dt.replace(tzinfo=self)
def FixedOffset(offset, _tzinfos = {}):
"""return a fixed-offset timezone based off a number of minutes.
>>> one = FixedOffset(-330)
>>> one
pytz.FixedOffset(-330)
>>> one.utcoffset(datetime.datetime.now())
datetime.timedelta(-1, 66600)
>>> two = FixedOffset(1380)
>>> two
pytz.FixedOffset(1380)
>>> two.utcoffset(datetime.datetime.now())
datetime.timedelta(0, 82800)
The datetime.timedelta must be between the range of -1 and 1 day,
non-inclusive.
>>> FixedOffset(1440)
Traceback (most recent call last):
...
ValueError: ('absolute offset is too large', 1440)
>>> FixedOffset(-1440)
Traceback (most recent call last):
...
ValueError: ('absolute offset is too large', -1440)
An offset of 0 is special-cased to return UTC.
>>> FixedOffset(0) is UTC
True
There should always be only one instance of a FixedOffset per timedelta.
This should be true for multiple creation calls.
>>> FixedOffset(-330) is one
True
>>> FixedOffset(1380) is two
True
It should also be true for pickling.
>>> import pickle
>>> pickle.loads(pickle.dumps(one)) is one
True
>>> pickle.loads(pickle.dumps(two)) is two
True
"""
if offset == 0:
return UTC
info = _tzinfos.get(offset)
if info is None:
# We haven't seen this one before. we need to save it.
# Use setdefault to avoid a race condition and make sure we have
# only one
info = _tzinfos.setdefault(offset, _FixedOffset(offset))
return info
FixedOffset.__safe_for_unpickling__ = True
def _test():
import doctest, os, sys
sys.path.insert(0, os.pardir)
import pytz
return doctest.testmod(pytz)
if __name__ == '__main__':
_test()
all_timezones_unfiltered = \
['Africa/Abidjan',
'Africa/Accra',
'Africa/Addis_Ababa',
'Africa/Algiers',
'Africa/Asmara',
'Africa/Asmera',
'Africa/Bamako',
'Africa/Bangui',
'Africa/Banjul',
'Africa/Bissau',
'Africa/Blantyre',
'Africa/Brazzaville',
'Africa/Bujumbura',
'Africa/Cairo',
'Africa/Casablanca',
'Africa/Ceuta',
'Africa/Conakry',
'Africa/Dakar',
'Africa/Dar_es_Salaam',
'Africa/Djibouti',
'Africa/Douala',
'Africa/El_Aaiun',
'Africa/Freetown',
'Africa/Gaborone',
'Africa/Harare',
'Africa/Johannesburg',
'Africa/Kampala',
'Africa/Khartoum',
'Africa/Kigali',
'Africa/Kinshasa',
'Africa/Lagos',
'Africa/Libreville',
'Africa/Lome',
'Africa/Luanda',
'Africa/Lubumbashi',
'Africa/Lusaka',
'Africa/Malabo',
'Africa/Maputo',
'Africa/Maseru',
'Africa/Mbabane',
'Africa/Mogadishu',
'Africa/Monrovia',
'Africa/Nairobi',
'Africa/Ndjamena',
'Africa/Niamey',
'Africa/Nouakchott',
'Africa/Ouagadougou',
'Africa/Porto-Novo',
'Africa/Sao_Tome',
'Africa/Timbuktu',
'Africa/Tripoli',
'Africa/Tunis',
'Africa/Windhoek',
'America/Adak',
'America/Anchorage',
'America/Anguilla',
'America/Antigua',
'America/Araguaina',
'America/Argentina/Buenos_Aires',
'America/Argentina/Catamarca',
'America/Argentina/ComodRivadavia',
'America/Argentina/Cordoba',
'America/Argentina/Jujuy',
'America/Argentina/La_Rioja',
'America/Argentina/Mendoza',
'America/Argentina/Rio_Gallegos',
'America/Argentina/Salta',
'America/Argentina/San_Juan',
'America/Argentina/San_Luis',
'America/Argentina/Tucuman',
'America/Argentina/Ushuaia',
'America/Aruba',
'America/Asuncion',
'America/Atikokan',
'America/Atka',
'America/Bahia',
'America/Barbados',
'America/Belem',
'America/Belize',
'America/Blanc-Sablon',
'America/Boa_Vista',
'America/Bogota',
'America/Boise',
'America/Buenos_Aires',
'America/Cambridge_Bay',
'America/Campo_Grande',
'America/Cancun',
'America/Caracas',
'America/Catamarca',
'America/Cayenne',
'America/Cayman',
'America/Chicago',
'America/Chihuahua',
'America/Coral_Harbour',
'America/Cordoba',
'America/Costa_Rica',
'America/Cuiaba',
'America/Curacao',
'America/Danmarkshavn',
'America/Dawson',
'America/Dawson_Creek',
'America/Denver',
'America/Detroit',
'America/Dominica',
'America/Edmonton',
'America/Eirunepe',
'America/El_Salvador',
'America/Ensenada',
'America/Fort_Wayne',
'America/Fortaleza',
'America/Glace_Bay',
'America/Godthab',
'America/Goose_Bay',
'America/Grand_Turk',
'America/Grenada',
'America/Guadeloupe',
'America/Guatemala',
'America/Guayaquil',
'America/Guyana',
'America/Halifax',
'America/Havana',
'America/Hermosillo',
'America/Indiana/Indianapolis',
'America/Indiana/Knox',
'America/Indiana/Marengo',
'America/Indiana/Petersburg',
'America/Indiana/Tell_City',
'America/Indiana/Vevay',
'America/Indiana/Vincennes',
'America/Indiana/Winamac',
'America/Indianapolis',
'America/Inuvik',
'America/Iqaluit',
'America/Jamaica',
'America/Jujuy',
'America/Juneau',
'America/Kentucky/Louisville',
'America/Kentucky/Monticello',
'America/Knox_IN',
'America/La_Paz',
'America/Lima',
'America/Los_Angeles',
'America/Louisville',
'America/Maceio',
'America/Managua',
'America/Manaus',
'America/Marigot',
'America/Martinique',
'America/Mazatlan',
'America/Mendoza',
'America/Menominee',
'America/Merida',
'America/Mexico_City',
'America/Miquelon',
'America/Moncton',
'America/Monterrey',
'America/Montevideo',
'America/Montreal',
'America/Montserrat',
'America/Nassau',
'America/New_York',
'America/Nipigon',
'America/Nome',
'America/Noronha',
'America/North_Dakota/Center',
'America/North_Dakota/New_Salem',
'America/Panama',
'America/Pangnirtung',
'America/Paramaribo',
'America/Phoenix',
'America/Port-au-Prince',
'America/Port_of_Spain',
'America/Porto_Acre',
'America/Porto_Velho',
'America/Puerto_Rico',
'America/Rainy_River',
'America/Rankin_Inlet',
'America/Recife',
'America/Regina',
'America/Resolute',
'America/Rio_Branco',
'America/Rosario',
'America/Santarem',
'America/Santiago',
'America/Santo_Domingo',
'America/Sao_Paulo',
'America/Scoresbysund',
'America/Shiprock',
'America/St_Barthelemy',
'America/St_Johns',
'America/St_Kitts',
'America/St_Lucia',
'America/St_Thomas',
'America/St_Vincent',
'America/Swift_Current',
'America/Tegucigalpa',
'America/Thule',
'America/Thunder_Bay',
'America/Tijuana',
'America/Toronto',
'America/Tortola',
'America/Vancouver',
'America/Virgin',
'America/Whitehorse',
'America/Winnipeg',
'America/Yakutat',
'America/Yellowknife',
'Antarctica/Casey',
'Antarctica/Davis',
'Antarctica/DumontDUrville',
'Antarctica/Mawson',
'Antarctica/McMurdo',
'Antarctica/Palmer',
'Antarctica/Rothera',
'Antarctica/South_Pole',
'Antarctica/Syowa',
'Antarctica/Vostok',
'Arctic/Longyearbyen',
'Asia/Aden',
'Asia/Almaty',
'Asia/Amman',
'Asia/Anadyr',
'Asia/Aqtau',
'Asia/Aqtobe',
'Asia/Ashgabat',
'Asia/Ashkhabad',
'Asia/Baghdad',
'Asia/Bahrain',
'Asia/Baku',
'Asia/Bangkok',
'Asia/Beirut',
'Asia/Bishkek',
'Asia/Brunei',
'Asia/Calcutta',
'Asia/Choibalsan',
'Asia/Chongqing',
'Asia/Chungking',
'Asia/Colombo',
'Asia/Dacca',
'Asia/Damascus',
'Asia/Dhaka',
'Asia/Dili',
'Asia/Dubai',
'Asia/Dushanbe',
'Asia/Gaza',
'Asia/Harbin',
'Asia/Ho_Chi_Minh',
'Asia/Hong_Kong',
'Asia/Hovd',
'Asia/Irkutsk',
'Asia/Istanbul',
'Asia/Jakarta',
'Asia/Jayapura',
'Asia/Jerusalem',
'Asia/Kabul',
'Asia/Kamchatka',
'Asia/Karachi',
'Asia/Kashgar',
'Asia/Kathmandu',
'Asia/Katmandu',
'Asia/Kolkata',
'Asia/Krasnoyarsk',
'Asia/Kuala_Lumpur',
'Asia/Kuching',
'Asia/Kuwait',
'Asia/Macao',
'Asia/Macau',
'Asia/Magadan',
'Asia/Makassar',
'Asia/Manila',
'Asia/Muscat',
'Asia/Nicosia',
'Asia/Novokuznetsk',
'Asia/Novosibirsk',
'Asia/Omsk',
'Asia/Oral',
'Asia/Phnom_Penh',
'Asia/Pontianak',
'Asia/Pyongyang',
'Asia/Qatar',
'Asia/Qyzylorda',
'Asia/Rangoon',
'Asia/Riyadh',
'Asia/Saigon',
'Asia/Sakhalin',
'Asia/Samarkand',
'Asia/Seoul',
'Asia/Shanghai',
'Asia/Singapore',
'Asia/Taipei',
'Asia/Tashkent',
'Asia/Tbilisi',
'Asia/Tehran',
'Asia/Tel_Aviv',
'Asia/Thimbu',
'Asia/Thimphu',
'Asia/Tokyo',
'Asia/Ujung_Pandang',
'Asia/Ulaanbaatar',
'Asia/Ulan_Bator',
'Asia/Urumqi',
'Asia/Vientiane',
'Asia/Vladivostok',
'Asia/Yakutsk',
'Asia/Yekaterinburg',
'Asia/Yerevan',
'Atlantic/Azores',
'Atlantic/Bermuda',
'Atlantic/Canary',
'Atlantic/Cape_Verde',
'Atlantic/Faeroe',
'Atlantic/Faroe',
'Atlantic/Jan_Mayen',
'Atlantic/Madeira',
'Atlantic/Reykjavik',
'Atlantic/South_Georgia',
'Atlantic/St_Helena',
'Atlantic/Stanley',
'Australia/ACT',
'Australia/Adelaide',
'Australia/Brisbane',
'Australia/Broken_Hill',
'Australia/Canberra',
'Australia/Currie',
'Australia/Darwin',
'Australia/Eucla',
'Australia/Hobart',
'Australia/LHI',
'Australia/Lindeman',
'Australia/Lord_Howe',
'Australia/Melbourne',
'Australia/NSW',
'Australia/North',
'Australia/Perth',
'Australia/Queensland',
'Australia/South',
'Australia/Sydney',
'Australia/Tasmania',
'Australia/Victoria',
'Australia/West',
'Australia/Yancowinna',
'Brazil/Acre',
'Brazil/DeNoronha',
'Brazil/East',
'Brazil/West',
'CET',
'CST6CDT',
'Canada/Atlantic',
'Canada/Central',
'Canada/East-Saskatchewan',
'Canada/Eastern',
'Canada/Mountain',
'Canada/Newfoundland',
'Canada/Pacific',
'Canada/Saskatchewan',
'Canada/Yukon',
'Chile/Continental',
'Chile/EasterIsland',
'Cuba',
'EET',
'EST',
'EST5EDT',
'Egypt',
'Eire',
'Etc/GMT',
'Etc/GMT+0',
'Etc/GMT+1',
'Etc/GMT+10',
'Etc/GMT+11',
'Etc/GMT+12',
'Etc/GMT+2',
'Etc/GMT+3',
'Etc/GMT+4',
'Etc/GMT+5',
'Etc/GMT+6',
'Etc/GMT+7',
'Etc/GMT+8',
'Etc/GMT+9',
'Etc/GMT-0',
'Etc/GMT-1',
'Etc/GMT-10',
'Etc/GMT-11',
'Etc/GMT-12',
'Etc/GMT-13',
'Etc/GMT-14',
'Etc/GMT-2',
'Etc/GMT-3',
'Etc/GMT-4',
'Etc/GMT-5',
'Etc/GMT-6',
'Etc/GMT-7',
'Etc/GMT-8',
'Etc/GMT-9',
'Etc/GMT0',
'Etc/Greenwich',
'Etc/UCT',
'Etc/UTC',
'Etc/Universal',
'Etc/Zulu',
'Europe/Amsterdam',
'Europe/Andorra',
'Europe/Athens',
'Europe/Belfast',
'Europe/Belgrade',
'Europe/Berlin',
'Europe/Bratislava',
'Europe/Brussels',
'Europe/Bucharest',
'Europe/Budapest',
'Europe/Chisinau',
'Europe/Copenhagen',
'Europe/Dublin',
'Europe/Gibraltar',
'Europe/Guernsey',
'Europe/Helsinki',
'Europe/Isle_of_Man',
'Europe/Istanbul',
'Europe/Jersey',
'Europe/Kaliningrad',
'Europe/Kiev',
'Europe/Lisbon',
'Europe/Ljubljana',
'Europe/London',
'Europe/Luxembourg',
'Europe/Madrid',
'Europe/Malta',
'Europe/Mariehamn',
'Europe/Minsk',
'Europe/Monaco',
'Europe/Moscow',
'Europe/Nicosia',
'Europe/Oslo',
'Europe/Paris',
'Europe/Podgorica',
'Europe/Prague',
'Europe/Riga',
'Europe/Rome',
'Europe/Samara',
'Europe/San_Marino',
'Europe/Sarajevo',
'Europe/Simferopol',
'Europe/Skopje',
'Europe/Sofia',
'Europe/Stockholm',
'Europe/Tallinn',
'Europe/Tirane',
'Europe/Tiraspol',
'Europe/Uzhgorod',
'Europe/Vaduz',
'Europe/Vatican',
'Europe/Vienna',
'Europe/Vilnius',
'Europe/Volgograd',
'Europe/Warsaw',
'Europe/Zagreb',
'Europe/Zaporozhye',
'Europe/Zurich',
'GB',
'GB-Eire',
'GMT',
'GMT+0',
'GMT-0',
'GMT0',
'Greenwich',
'HST',
'Hongkong',
'Iceland',
'Indian/Antananarivo',
'Indian/Chagos',
'Indian/Christmas',
'Indian/Cocos',
'Indian/Comoro',
'Indian/Kerguelen',
'Indian/Mahe',
'Indian/Maldives',
'Indian/Mauritius',
'Indian/Mayotte',
'Indian/Reunion',
'Iran',
'Israel',
'Jamaica',
'Japan',
'Kwajalein',
'Libya',
'MET',
'MST',
'MST7MDT',
'Mexico/BajaNorte',
'Mexico/BajaSur',
'Mexico/General',
'NZ',
'NZ-CHAT',
'Navajo',
'PRC',
'PST8PDT',
'Pacific/Apia',
'Pacific/Auckland',
'Pacific/Chatham',
'Pacific/Easter',
'Pacific/Efate',
'Pacific/Enderbury',
'Pacific/Fakaofo',
'Pacific/Fiji',
'Pacific/Funafuti',
'Pacific/Galapagos',
'Pacific/Gambier',
'Pacific/Guadalcanal',
'Pacific/Guam',
'Pacific/Honolulu',
'Pacific/Johnston',
'Pacific/Kiritimati',
'Pacific/Kosrae',
'Pacific/Kwajalein',
'Pacific/Majuro',
'Pacific/Marquesas',
'Pacific/Midway',
'Pacific/Nauru',
'Pacific/Niue',
'Pacific/Norfolk',
'Pacific/Noumea',
'Pacific/Pago_Pago',
'Pacific/Palau',
'Pacific/Pitcairn',
'Pacific/Ponape',
'Pacific/Port_Moresby',
'Pacific/Rarotonga',
'Pacific/Saipan',
'Pacific/Samoa',
'Pacific/Tahiti',
'Pacific/Tarawa',
'Pacific/Tongatapu',
'Pacific/Truk',
'Pacific/Wake',
'Pacific/Wallis',
'Pacific/Yap',
'Poland',
'Portugal',
'ROC',
'ROK',
'Singapore',
'Turkey',
'UCT',
'US/Alaska',
'US/Aleutian',
'US/Arizona',
'US/Central',
'US/East-Indiana',
'US/Eastern',
'US/Hawaii',
'US/Indiana-Starke',
'US/Michigan',
'US/Mountain',
'US/Pacific',
'US/Pacific-New',
'US/Samoa',
'UTC',
'Universal',
'W-SU',
'WET',
'Zulu']
all_timezones = _LazyList(
lambda: filter(resource_exists, all_timezones_unfiltered)
)
all_timezones_set = set(all_timezones_unfiltered) # XXX
common_timezones_unfiltered = \
['Africa/Abidjan',
'Africa/Accra',
'Africa/Addis_Ababa',
'Africa/Algiers',
'Africa/Asmara',
'Africa/Bamako',
'Africa/Bangui',
'Africa/Banjul',
'Africa/Bissau',
'Africa/Blantyre',
'Africa/Brazzaville',
'Africa/Bujumbura',
'Africa/Cairo',
'Africa/Casablanca',
'Africa/Ceuta',
'Africa/Conakry',
'Africa/Dakar',
'Africa/Dar_es_Salaam',
'Africa/Djibouti',
'Africa/Douala',
'Africa/El_Aaiun',
'Africa/Freetown',
'Africa/Gaborone',
'Africa/Harare',
'Africa/Johannesburg',
'Africa/Kampala',
'Africa/Khartoum',
'Africa/Kigali',
'Africa/Kinshasa',
'Africa/Lagos',
'Africa/Libreville',
'Africa/Lome',
'Africa/Luanda',
'Africa/Lubumbashi',
'Africa/Lusaka',
'Africa/Malabo',
'Africa/Maputo',
'Africa/Maseru',
'Africa/Mbabane',
'Africa/Mogadishu',
'Africa/Monrovia',
'Africa/Nairobi',
'Africa/Ndjamena',
'Africa/Niamey',
'Africa/Nouakchott',
'Africa/Ouagadougou',
'Africa/Porto-Novo',
'Africa/Sao_Tome',
'Africa/Tripoli',
'Africa/Tunis',
'Africa/Windhoek',
'America/Adak',
'America/Anchorage',
'America/Anguilla',
'America/Antigua',
'America/Araguaina',
'America/Argentina/Buenos_Aires',
'America/Argentina/Catamarca',
'America/Argentina/Cordoba',
'America/Argentina/Jujuy',
'America/Argentina/La_Rioja',
'America/Argentina/Mendoza',
'America/Argentina/Rio_Gallegos',
'America/Argentina/Salta',
'America/Argentina/San_Juan',
'America/Argentina/San_Luis',
'America/Argentina/Tucuman',
'America/Argentina/Ushuaia',
'America/Aruba',
'America/Asuncion',
'America/Atikokan',
'America/Bahia',
'America/Barbados',
'America/Belem',
'America/Belize',
'America/Blanc-Sablon',
'America/Boa_Vista',
'America/Bogota',
'America/Boise',
'America/Cambridge_Bay',
'America/Campo_Grande',
'America/Cancun',
'America/Caracas',
'America/Cayenne',
'America/Cayman',
'America/Chicago',
'America/Chihuahua',
'America/Costa_Rica',
'America/Cuiaba',
'America/Curacao',
'America/Danmarkshavn',
'America/Dawson',
'America/Dawson_Creek',
'America/Denver',
'America/Detroit',
'America/Dominica',
'America/Edmonton',
'America/Eirunepe',
'America/El_Salvador',
'America/Fortaleza',
'America/Glace_Bay',
'America/Godthab',
'America/Goose_Bay',
'America/Grand_Turk',
'America/Grenada',
'America/Guadeloupe',
'America/Guatemala',
'America/Guayaquil',
'America/Guyana',
'America/Halifax',
'America/Havana',
'America/Hermosillo',
'America/Indiana/Indianapolis',
'America/Indiana/Knox',
'America/Indiana/Marengo',
'America/Indiana/Petersburg',
'America/Indiana/Tell_City',
'America/Indiana/Vevay',
'America/Indiana/Vincennes',
'America/Indiana/Winamac',
'America/Inuvik',
'America/Iqaluit',
'America/Jamaica',
'America/Juneau',
'America/Kentucky/Louisville',
'America/Kentucky/Monticello',
'America/La_Paz',
'America/Lima',
'America/Los_Angeles',
'America/Maceio',
'America/Managua',
'America/Manaus',
'America/Martinique',
'America/Mazatlan',
'America/Menominee',
'America/Merida',
'America/Mexico_City',
'America/Miquelon',
'America/Moncton',
'America/Monterrey',
'America/Montevideo',
'America/Montreal',
'America/Montserrat',
'America/Nassau',
'America/New_York',
'America/Nipigon',
'America/Nome',
'America/Noronha',
'America/North_Dakota/Center',
'America/North_Dakota/New_Salem',
'America/Panama',
'America/Pangnirtung',
'America/Paramaribo',
'America/Phoenix',
'America/Port-au-Prince',
'America/Port_of_Spain',
'America/Porto_Velho',
'America/Puerto_Rico',
'America/Rainy_River',
'America/Rankin_Inlet',
'America/Recife',
'America/Regina',
'America/Resolute',
'America/Rio_Branco',
'America/Santarem',
'America/Santiago',
'America/Santo_Domingo',
'America/Sao_Paulo',
'America/Scoresbysund',
'America/St_Johns',
'America/St_Kitts',
'America/St_Lucia',
'America/St_Thomas',
'America/St_Vincent',
'America/Swift_Current',
'America/Tegucigalpa',
'America/Thule',
'America/Thunder_Bay',
'America/Tijuana',
'America/Toronto',
'America/Tortola',
'America/Vancouver',
'America/Whitehorse',
'America/Winnipeg',
'America/Yakutat',
'America/Yellowknife',
'Antarctica/Casey',
'Antarctica/Davis',
'Antarctica/DumontDUrville',
'Antarctica/Mawson',
'Antarctica/McMurdo',
'Antarctica/Palmer',
'Antarctica/Rothera',
'Antarctica/Syowa',
'Antarctica/Vostok',
'Asia/Aden',
'Asia/Almaty',
'Asia/Amman',
'Asia/Anadyr',
'Asia/Aqtau',
'Asia/Aqtobe',
'Asia/Ashgabat',
'Asia/Baghdad',
'Asia/Bahrain',
'Asia/Baku',
'Asia/Bangkok',
'Asia/Beirut',
'Asia/Bishkek',
'Asia/Brunei',
'Asia/Choibalsan',
'Asia/Chongqing',
'Asia/Colombo',
'Asia/Damascus',
'Asia/Dhaka',
'Asia/Dili',
'Asia/Dubai',
'Asia/Dushanbe',
'Asia/Gaza',
'Asia/Harbin',
'Asia/Ho_Chi_Minh',
'Asia/Hong_Kong',
'Asia/Hovd',
'Asia/Irkutsk',
'Asia/Jakarta',
'Asia/Jayapura',
'Asia/Jerusalem',
'Asia/Kabul',
'Asia/Kamchatka',
'Asia/Karachi',
'Asia/Kashgar',
'Asia/Kathmandu',
'Asia/Kolkata',
'Asia/Krasnoyarsk',
'Asia/Kuala_Lumpur',
'Asia/Kuching',
'Asia/Kuwait',
'Asia/Macau',
'Asia/Magadan',
'Asia/Makassar',
'Asia/Manila',
'Asia/Muscat',
'Asia/Nicosia',
'Asia/Novokuznetsk',
'Asia/Novosibirsk',
'Asia/Omsk',
'Asia/Oral',
'Asia/Phnom_Penh',
'Asia/Pontianak',
'Asia/Pyongyang',
'Asia/Qatar',
'Asia/Qyzylorda',
'Asia/Rangoon',
'Asia/Riyadh',
'Asia/Sakhalin',
'Asia/Samarkand',
'Asia/Seoul',
'Asia/Shanghai',
'Asia/Singapore',
'Asia/Taipei',
'Asia/Tashkent',
'Asia/Tbilisi',
'Asia/Tehran',
'Asia/Thimphu',
'Asia/Tokyo',
'Asia/Ulaanbaatar',
'Asia/Urumqi',
'Asia/Vientiane',
'Asia/Vladivostok',
'Asia/Yakutsk',
'Asia/Yekaterinburg',
'Asia/Yerevan',
'Atlantic/Azores',
'Atlantic/Bermuda',
'Atlantic/Canary',
'Atlantic/Cape_Verde',
'Atlantic/Faroe',
'Atlantic/Madeira',
'Atlantic/Reykjavik',
'Atlantic/South_Georgia',
'Atlantic/St_Helena',
'Atlantic/Stanley',
'Australia/Adelaide',
'Australia/Brisbane',
'Australia/Broken_Hill',
'Australia/Currie',
'Australia/Darwin',
'Australia/Eucla',
'Australia/Hobart',
'Australia/Lindeman',
'Australia/Lord_Howe',
'Australia/Melbourne',
'Australia/Perth',
'Australia/Sydney',
'Europe/Amsterdam',
'Europe/Andorra',
'Europe/Athens',
'Europe/Belgrade',
'Europe/Berlin',
'Europe/Brussels',
'Europe/Bucharest',
'Europe/Budapest',
'Europe/Chisinau',
'Europe/Copenhagen',
'Europe/Dublin',
'Europe/Gibraltar',
'Europe/Helsinki',
'Europe/Istanbul',
'Europe/Kaliningrad',
'Europe/Kiev',
'Europe/Lisbon',
'Europe/London',
'Europe/Luxembourg',
'Europe/Madrid',
'Europe/Malta',
'Europe/Minsk',
'Europe/Monaco',
'Europe/Moscow',
'Europe/Oslo',
'Europe/Paris',
'Europe/Prague',
'Europe/Riga',
'Europe/Rome',
'Europe/Samara',
'Europe/Simferopol',
'Europe/Sofia',
'Europe/Stockholm',
'Europe/Tallinn',
'Europe/Tirane',
'Europe/Uzhgorod',
'Europe/Vaduz',
'Europe/Vienna',
'Europe/Vilnius',
'Europe/Volgograd',
'Europe/Warsaw',
'Europe/Zaporozhye',
'Europe/Zurich',
'GMT',
'Indian/Antananarivo',
'Indian/Chagos',
'Indian/Christmas',
'Indian/Cocos',
'Indian/Comoro',
'Indian/Kerguelen',
'Indian/Mahe',
'Indian/Maldives',
'Indian/Mauritius',
'Indian/Mayotte',
'Indian/Reunion',
'Pacific/Apia',
'Pacific/Auckland',
'Pacific/Chatham',
'Pacific/Easter',
'Pacific/Efate',
'Pacific/Enderbury',
'Pacific/Fakaofo',
'Pacific/Fiji',
'Pacific/Funafuti',
'Pacific/Galapagos',
'Pacific/Gambier',
'Pacific/Guadalcanal',
'Pacific/Guam',
'Pacific/Honolulu',
'Pacific/Johnston',
'Pacific/Kiritimati',
'Pacific/Kosrae',
'Pacific/Kwajalein',
'Pacific/Majuro',
'Pacific/Marquesas',
'Pacific/Midway',
'Pacific/Nauru',
'Pacific/Niue',
'Pacific/Norfolk',
'Pacific/Noumea',
'Pacific/Pago_Pago',
'Pacific/Palau',
'Pacific/Pitcairn',
'Pacific/Ponape',
'Pacific/Port_Moresby',
'Pacific/Rarotonga',
'Pacific/Saipan',
'Pacific/Tahiti',
'Pacific/Tarawa',
'Pacific/Tongatapu',
'Pacific/Truk',
'Pacific/Wake',
'Pacific/Wallis',
'US/Alaska',
'US/Arizona',
'US/Central',
'US/Eastern',
'US/Hawaii',
'US/Mountain',
'US/Pacific',
'UTC']
common_timezones = _LazyList(
lambda: filter(resource_exists, common_timezones_unfiltered)
)
common_timezones_set = set(common_timezones_unfiltered) # XXX
|
Micronaet/micronaet-product
|
refs/heads/master
|
price_rule_and_force/rule.py
|
1
|
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import sys
import logging
import openerp
import openerp.netsvc as netsvc
import openerp.addons.decimal_precision as dp
from openerp.osv import fields, osv, expression, orm
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp import SUPERUSER_ID, api
from openerp import tools
from openerp.tools.translate import _
from openerp.tools.float_utils import float_round as round
from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
DATETIME_FORMATS_MAP,
float_compare)
_logger = logging.getLogger(__name__)
class ProductProductPriceRule(orm.Model):
""" Model name: ProductProductPriceRule
"""
_name = 'product.product.price.rule'
_description = 'Price rule'
_order = 'name'
# Utility:
def product_ids_from_mask(self, cr, uid, mask, context=None):
''' Get list of product from mask
'''
cr.execute('''
SELECT distinct id
FROM product_product
WHERE default_code ilike \'%s\'
''' % mask
)
return [item[0] for item in cr.fetchall()]
def force_product_list(self, cr, uid, ids, context=None):
''' Force all list (associate price and rule)
'''
product_pool = self.pool.get('product.product')
_logger.info('Start generating rule for price')
rule_ids = self.search(cr, uid, [], context=context)
# TODO reset all rules before?
for rule in sorted(self.browse(cr, uid, rule_ids, context=context),
key=lambda r: (len(r.name), r.name)):
product_ids = self.product_ids_from_mask(
cr, uid, rule.name, context=context)
if not product_ids:
continue
_logger.info('Update product %s (mask: %s)' % (
len(product_ids),
rule.name,
))
product_pool.write(cr, uid, product_ids, {
'lst_price': rule.price,
'price_rule_id': rule.id,
}, context=context)
return True
def get_product_list(self, cr, uid, ids, context=None):
''' Check product that work with this rule
'''
rule_proxy = self.browse(cr, uid, ids, context=context)[0]
product_ids = self.product_ids_from_mask(
cr, uid, rule_proxy.name, context=context)
if not product_ids:
raise osv.except_osv(
_('Error!'), _('No product with mask selected!'), )
model_pool = self.pool.get('ir.model.data')
tree_id = model_pool.get_object_reference(
cr, uid,
'price_rule_and_force', 'view_product_product_add_price_rule_tree'
)[1]
return {
'type': 'ir.actions.act_window',
'name': _('Product with rule'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'product.product',
'view_id': tree_id,
'views': [(tree_id, 'tree'), (False, 'form')],
'domain': [('id', 'in', product_ids)],
'context': context,
'target': 'current',
'nodestroy': False,
}
_columns = {
'name': fields.char(
'Rule mask', size=64, required=True,
help='''Product mask, use _ for replace char % for no care, ex.
127_X% >>> 172OX1 127AX34 etc.
'''),
'price': fields.float('Price', digits=(16, 3), required=True),
}
class ProductProduct(orm.Model):
""" Model name: ProductProduct
"""
_inherit = 'product.product'
_columns = {
'price_rule_id': fields.many2one(
'product.product.price.rule', 'Price rule'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
CAAD-RWTH/ClockworkForDynamo
|
refs/heads/master
|
nodes/1.x/python/TextElement.SetText.py
|
4
|
import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
clr.AddReference("RevitNodes")
import Revit
clr.ImportExtensions(Revit.Elements)
clr.AddReference("RevitServices")
import RevitServices
from RevitServices.Persistence import DocumentManager
from RevitServices.Transactions import TransactionManager
def SetText(item, str):
if hasattr(item, "Text"):
item.Text = str
return True
else: return False
doc = DocumentManager.Instance.CurrentDBDocument
items = UnwrapElement(IN[0])
TransactionManager.Instance.EnsureInTransaction(doc)
if isinstance(IN[0], list):
if isinstance(IN[1], list): OUT = [SetText(x, y) for x, y in zip(items, IN[1])]
else: OUT = [SetText(x, IN[1]) for x in items]
else:
if isinstance(IN[1], list): OUT = False
else: OUT = SetText(items, IN[1])
TransactionManager.Instance.TransactionTaskDone()
|
AmberJBlue/aima-python
|
refs/heads/master
|
submissions/Porter/myCSPs.py
|
38
|
import csp
rgb = ['R', 'G', 'B']
domains = {
'AM': rgb,
'ES': rgb,
'LK': rgb,
'RB': rgb,
'FL': rgb,
'G': rgb,
'S': rgb,
'M': rgb,
'BL': rgb,
'C': rgb,
'H': rgb
}
variables = domains.keys()
neighbors = {
'AM': ['LK', 'ES'],
'ES': ['BL', 'M'],
'LK': ['RB', 'FL', 'AM'],
'RB': ['LK', 'FL', 'H'],
'FL': ['G', 'LK', 'RB'],
'G': ['FL', 'S'],
'S': ['G', 'M'],
'M': ['ES', 'BL', 'S'],
'BL': ['ES', 'C', 'M'],
'C': ['BL', 'H'],
'H': ['C', 'RB']
}
def constraints(A, a, B, b):
if A == B: # e.g. NSW == NSW
return True
if a == b: # e.g. WA = G and SA = G
return False
return True
myAus = csp.CSP(variables, domains, neighbors, constraints)
myCSPs = [
{'csp': myAus,
# 'select_unassigned_variable':csp.mrv,
}
]
|
alangwansui/mtl_ordercenter
|
refs/heads/master
|
openerp/addons/point_of_sale/wizard/pos_return.py
|
55
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import netsvc
from openerp.osv import osv,fields
from openerp.tools.translate import _
import time
class pos_return(osv.osv_memory):
_name = 'pos.return'
_description = 'Point of sale return'
_columns = {
'pos_moves_ids' : fields.one2many('pos.return.memory', 'pos_moves_id', 'Moves'),
}
def default_get(self, cr, uid, fields, context=None):
"""
To get default values for the object.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param fields: List of fields for which we want default values
@param context: A standard dictionary
@return: A dictionary which of fields with values.
"""
res = super(pos_return, self).default_get(cr, uid, fields, context=context)
order_obj = self.pool.get('pos.order')
if context is None:
context={}
active_ids = context.get('active_ids')
result=[]
for order in order_obj.browse(cr, uid, active_ids, context=context):
for line in order.lines:
result.append({
'product_id' : line.product_id.id,
'quantity' : line.qty,
'line_id':line.id
})
res.update({'pos_moves_ids': result})
return res
def create_returns(self, cr, uid, data, context=None):
"""
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return: Return the add product form again for adding more product
"""
if context is None:
context = {}
current_rec = self.browse(cr, uid, data, context=context)[0]
order_obj =self.pool.get('pos.order')
line_obj = self.pool.get('pos.order.line')
pos_current = order_obj.browse(cr, uid, context.get('active_id'), context=context)
for pos_line in pos_current.lines:
for record in current_rec.pos_moves_ids:
if pos_line.id == record.line_id:
less_qty = record.quantity
line_obj.write(cr, uid, pos_line.id, {'qty':pos_line.qty - less_qty}, context=context)
return {
'name': _('Add Product'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'pos.add.product',
'view_id': False,
'target':'new',
'views': False,
'context': context,
'type': 'ir.actions.act_window',
}
def create_returns2(self, cr, uid, ids, context=None):
if context is None:
context = {}
active_id = context.get('active_id', False)
order_obj =self.pool.get('pos.order')
line_obj = self.pool.get('pos.order.line')
picking_obj = self.pool.get('stock.picking')
stock_move_obj = self.pool.get('stock.move')
property_obj= self.pool.get("ir.property")
uom_obj =self. pool.get('product.uom')
statementl_obj = self.pool.get('account.bank.statement.line')
wf_service = netsvc.LocalService("workflow")
#Todo :Need to clean the code
if active_id:
data = self.browse(cr, uid, ids, context=context)[0]
date_cur = time.strftime('%Y-%m-%d %H:%M:%S')
for order_id in order_obj.browse(cr, uid, [active_id], context=context):
source_stock_id = property_obj.get(cr, uid, 'property_stock_customer', 'res.partner', context=context).id
cr.execute("SELECT s.id FROM stock_location s, stock_warehouse w "
"WHERE w.lot_stock_id=s.id AND w.id=%s ",
(order_id.shop_id.warehouse_id.id,))
res = cr.fetchone()
location_id = res and res[0] or None
new_picking = picking_obj.copy(cr, uid, order_id.picking_id.id, {'name':'%s (return)' % order_id.name,
'move_lines': [],
'state':'draft',
'type': 'in',
'partner_id': order_id.partner_id.id,
'date': date_cur })
new_order = order_obj.copy(cr, uid, order_id.id, {'name': 'Refund %s'%order_id.name,
'lines':[],
'statement_ids':[],
'picking_id':[]})
account_def = property_obj.get(cr, uid, 'property_account_payable', 'res.partner', context=context)
amount = 0.0
for line in order_id.lines:
for record in data.pos_moves_ids:
if line.id == record.line_id:
qty = record.quantity
amount += qty * line.price_unit
stock_move_obj.create(cr, uid, {
'product_qty': qty ,
'product_uos_qty': uom_obj._compute_qty(cr, uid, qty ,line.product_id.uom_id.id),
'picking_id': new_picking,
'product_uom': line.product_id.uom_id.id,
'location_id': source_stock_id,
'product_id': line.product_id.id,
'location_dest_id': location_id,
'name': '%s (return)' %order_id.name,
'date': date_cur
})
if qty != 0.0:
line_obj.copy(cr, uid, line.id, {'qty': -qty, 'order_id': new_order})
statementl_obj.create(cr, uid, {
'name': 'Refund %s'%order_id.name,
'statement_id': order_id.statement_ids[0].statement_id.id,
'pos_statement_id': new_order,
'date': fields.date.context_today(self, cr, uid, context=context),
'account_id': order_id.partner_id and order_id.partner_id.property_account_payable \
and order_id.partner_id.property_account_payable.id or account_def.id,
'amount': -amount,
})
order_obj.write(cr,uid, [active_id,new_order], {'state': 'done'})
wf_service.trg_validate(uid, 'stock.picking', new_picking, 'button_confirm', cr)
picking_obj.force_assign(cr, uid, [new_picking], context)
act = {
'domain': "[('id', 'in', ["+str(new_order)+"])]",
'name': 'Refunded Orders',
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'pos.order',
'auto_refresh':0,
'res_id':new_order,
'view_id': False,
'context':context,
'type': 'ir.actions.act_window'
}
return act
pos_return()
class add_product(osv.osv_memory):
_inherit = 'pos.add.product'
def select_product(self, cr, uid, ids, context=None):
"""
To get the product and quantity and add in order .
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return : Retrun the add product form again for adding more product
"""
if context is None:
context = {}
active_id=context.get('active_id', False)
data = self.read(cr, uid, ids)
data = data and data[0] or False
if active_id:
order_obj = self.pool.get('pos.order')
picking_obj = self.pool.get('stock.picking')
stock_move_obj = self.pool.get('stock.move')
property_obj= self.pool.get("ir.property")
date_cur=time.strftime('%Y-%m-%d')
uom_obj = self.pool.get('product.uom')
prod_obj=self.pool.get('product.product')
wf_service = netsvc.LocalService("workflow")
order_obj.add_product(cr, uid, active_id, data['product_id'], data['quantity'], context=context)
for order_id in order_obj.browse(cr, uid, [active_id], context=context):
prod=data['product_id']
qty=data['quantity']
stock_dest_id = property_obj.get(cr, uid, 'property_stock_customer', 'res.partner', context=context).id
cr.execute("SELECT s.id FROM stock_location s, stock_warehouse w "
"WHERE w.lot_stock_id=s.id AND w.id=%s ",
(order_id.shop_id.warehouse_id.id,))
res=cr.fetchone()
location_id=res and res[0] or None
prod_id=prod_obj.browse(cr, uid, prod, context=context)
new_picking=picking_obj.create(cr, uid, {
'name':'%s (Added)' %order_id.name,
'move_lines':[],
'state':'draft',
'type':'out',
'date':date_cur
})
stock_move_obj.create(cr, uid, {
'product_qty': qty,
'product_uos_qty': uom_obj._compute_qty(cr, uid, prod_id.uom_id.id, qty, prod_id.uom_id.id),
'picking_id':new_picking,
'product_uom':prod_id.uom_id.id,
'location_id':location_id,
'product_id':prod_id.id,
'location_dest_id':stock_dest_id,
'name':'%s (return)' %order_id.name,
'date':date_cur
})
wf_service.trg_validate(uid, 'stock.picking', new_picking, 'button_confirm', cr)
picking_obj.force_assign(cr, uid, [new_picking], context)
order_obj.write(cr,uid,active_id,{'picking_id':new_picking})
return {
'name': _('Add Product'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'pos.add.product',
'view_id': False,
'target':'new',
'context':context,
'views': False,
'type': 'ir.actions.act_window',
}
def close_action(self, cr, uid, ids, context=None):
if context is None: context = {}
active_ids=context.get('active_ids', False)
order_obj = self.pool.get('pos.order')
lines_obj = self.pool.get('pos.order.line')
picking_obj = self.pool.get('stock.picking')
stock_move_obj = self.pool.get('stock.move')
property_obj= self.pool.get("ir.property")
invoice_obj=self.pool.get('account.invoice')
date_cur=time.strftime('%Y-%m-%d %H:%M:%S')
uom_obj = self.pool.get('product.uom')
return_boj=self.pool.get('pos.return')
return_id = return_boj.search(cr,uid,[])
data = {}
if return_id:
data = return_boj.read(cr,uid,return_id,[])[0]
wf_service = netsvc.LocalService("workflow")
self_data = self.browse(cr, uid, ids, context=context)[0]
order_obj.add_product(cr, uid, active_ids[0], self_data.product_id.id, self_data.quantity, context=context)
for order_id in order_obj.browse(cr, uid, active_ids, context=context):
stock_dest_id = property_obj.get(cr, uid, 'property_stock_customer', 'res.partner', context=context).id
cr.execute("SELECT s.id FROM stock_location s, stock_warehouse w "
" WHERE w.lot_stock_id=s.id AND w.id=%s ",
(order_id.shop_id.warehouse_id.id,))
res=cr.fetchone()
location_id=res and res[0] or None
if order_id.invoice_id:
invoice_obj.refund(cr, uid, [order_id.invoice_id.id], time.strftime('%Y-%m-%d'), False, order_id.name, context=context)
new_picking=picking_obj.create(cr, uid, {
'name':'%s (return)' %order_id.name,
'move_lines':[], 'state':'draft',
'type':'in',
'date':date_cur
})
for line in order_id.lines:
key= 'return%s' % line.id
if line.id:
if data.has_key(key):
qty = data[key]
lines_obj.write(cr,uid,[line.id], {
'qty':line.qty-(data[key] or 0.0)
})
else:
qty = line.qty
stock_move_obj.create(cr, uid, {
'product_qty': qty,
'product_uos_qty': uom_obj._compute_qty(cr, uid, qty, line.product_id.uom_id.id),
'picking_id':new_picking,
'product_uom':line.product_id.uom_id.id,
'location_id':location_id,
'product_id':line.product_id.id,
'location_dest_id':stock_dest_id,
'name':'%s (return)' % order_id.name,
'date':date_cur,
})
wf_service.trg_validate(uid, 'stock.picking',new_picking,'button_confirm', cr)
picking_obj.force_assign(cr, uid, [new_picking], context)
obj=order_obj.browse(cr,uid, active_ids[0])
context.update({'return':'return'})
if obj.amount_total != obj.amount_paid:
return {
'name': _('Make Payment'),
'context ':context,
'view_type': 'form',
'view_mode': 'form',
'res_model': 'pos.make.payment',
'view_id': False,
'target': 'new',
'views': False,
'type': 'ir.actions.act_window',
}
return True
add_product()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
deKupini/erp
|
refs/heads/master
|
openerp/cli/__init__.py
|
58
|
import logging
import sys
import os
import openerp
from command import Command, main
import deploy
import scaffold
import server
import shell
import start
|
PhilReinhold/pyqt_utils
|
refs/heads/master
|
settings_window.py
|
1
|
from PyQt4.QtCore import QObject, pyqtSignal, QThread, Qt
from PyQt4.QtGui import QApplication, QMainWindow
import sys
def run(widget_class, settings=None, ui_version=1, **kwargs):
app = QApplication.instance()
if app is None:
app = QApplication([])
if settings is not None:
window = SettingsWindow(settings, ui_version)
window.setCentralWidget(widget_class(**kwargs))
window.setWindowTitle(window.centralWidget().windowTitle())
window.restore_from_settings()
else:
window = widget_class(**kwargs)
window.show()
app.setQuitOnLastWindowClosed(True)
sys.exit(app.exec_())
class SettingsWindow(QMainWindow):
def __init__(self, settings, ui_version):
super(SettingsWindow, self).__init__()
self.settings = settings
self.ui_version = ui_version
def restore_from_settings(self):
self.restoreGeometry(self.settings.value("geometry").toByteArray())
self.restoreState(self.settings.value("state").toByteArray(), self.ui_version)
def closeEvent(self, ev):
if self.settings is not None:
self.settings.setValue("geometry", self.saveGeometry())
self.settings.setValue("state", self.saveState(self.ui_version))
widget = self.centralWidget()
del widget
return super(SettingsWindow, self).closeEvent(ev)
class Worker(QObject):
finished = pyqtSignal(name="finished")
def __init__(self, *args):
super(Worker, self).__init__()
self.args = args
def start(self):
self.output = self.process(*self.args)
self.finished.emit()
def run_in_thread(fn, args):
worker = Worker(*args)
worker.process = fn
thread = QThread()
worker.moveToThread(thread)
app = QApplication.instance()
app.connect(thread, Qt.SIGNAL('started()'), worker.start)
app.connect(worker, Qt.SIGNAL('finished()'), thread.quit)
app.connect(worker, Qt.SIGNAL('finished()'), thread.deleteLater)
return worker, thread
|
EmadMokhtar/Django
|
refs/heads/master
|
tests/admin_scripts/custom_templates/app_template/api.py
|
581
|
# your API code
|
XenGi/meckbd
|
refs/heads/master
|
firmware/tools/arm/arm-none-eabi/share/gdb/python/gdb/prompt.py
|
137
|
# Extended prompt utilities.
# Copyright (C) 2011-2013 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""" Extended prompt library functions."""
import gdb
import os
def _prompt_pwd(ignore):
"The current working directory."
return os.getcwdu()
def _prompt_object_attr(func, what, attr, nattr):
"""Internal worker for fetching GDB attributes."""
if attr is None:
attr = nattr
try:
obj = func()
except gdb.error:
return '<no %s>' % what
if hasattr(obj, attr):
result = getattr(obj, attr)
if callable(result):
result = result()
return result
else:
return '<no attribute %s on current %s>' % (attr, what)
def _prompt_frame(attr):
"The selected frame; an argument names a frame parameter."
return _prompt_object_attr(gdb.selected_frame, 'frame', attr, 'name')
def _prompt_thread(attr):
"The selected thread; an argument names a thread parameter."
return _prompt_object_attr(gdb.selected_thread, 'thread', attr, 'num')
def _prompt_version(attr):
"The version of GDB."
return gdb.VERSION
def _prompt_esc(attr):
"The ESC character."
return '\033'
def _prompt_bs(attr):
"A backslash."
return '\\'
def _prompt_n(attr):
"A newline."
return '\n'
def _prompt_r(attr):
"A carriage return."
return '\r'
def _prompt_param(attr):
"A parameter's value; the argument names the parameter."
return gdb.parameter(attr)
def _prompt_noprint_begin(attr):
"Begins a sequence of non-printing characters."
return '\001'
def _prompt_noprint_end(attr):
"Ends a sequence of non-printing characters."
return '\002'
prompt_substitutions = {
'e': _prompt_esc,
'\\': _prompt_bs,
'n': _prompt_n,
'r': _prompt_r,
'v': _prompt_version,
'w': _prompt_pwd,
'f': _prompt_frame,
't': _prompt_thread,
'p': _prompt_param,
'[': _prompt_noprint_begin,
']': _prompt_noprint_end
}
def prompt_help():
"""Generate help dynamically from the __doc__ strings of attribute
functions."""
result = ''
keys = sorted (prompt_substitutions.keys())
for key in keys:
result += ' \\%s\t%s\n' % (key, prompt_substitutions[key].__doc__)
result += """
A substitution can be used in a simple form, like "\\f".
An argument can also be passed to it, like "\\f{name}".
The meaning of the argument depends on the particular substitution."""
return result
def substitute_prompt(prompt):
"Perform substitutions on PROMPT."
result = ''
plen = len(prompt)
i = 0
while i < plen:
if prompt[i] == '\\':
i = i + 1
if i >= plen:
break
cmdch = prompt[i]
if cmdch in prompt_substitutions:
cmd = prompt_substitutions[cmdch]
if i + 1 < plen and prompt[i + 1] == '{':
j = i + 1
while j < plen and prompt[j] != '}':
j = j + 1
# Just ignore formatting errors.
if j >= plen or prompt[j] != '}':
arg = None
else:
arg = prompt[i + 2 : j]
i = j
else:
arg = None
result += str(cmd(arg))
else:
# Unrecognized escapes are turned into the escaped
# character itself.
result += prompt[i]
else:
result += prompt[i]
i = i + 1
return result
|
1haodian/spark
|
refs/heads/master
|
python/pyspark/mllib/feature.py
|
52
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Python package for feature in MLlib.
"""
from __future__ import absolute_import
import sys
import warnings
import random
import binascii
if sys.version >= '3':
basestring = str
unicode = str
from py4j.protocol import Py4JJavaError
from pyspark import since
from pyspark.rdd import RDD, ignore_unicode_prefix
from pyspark.mllib.common import callMLlibFunc, JavaModelWrapper
from pyspark.mllib.linalg import (
Vector, Vectors, DenseVector, SparseVector, _convert_to_vector)
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.util import JavaLoader, JavaSaveable
__all__ = ['Normalizer', 'StandardScalerModel', 'StandardScaler',
'HashingTF', 'IDFModel', 'IDF', 'Word2Vec', 'Word2VecModel',
'ChiSqSelector', 'ChiSqSelectorModel', 'ElementwiseProduct']
class VectorTransformer(object):
"""
.. note:: DeveloperApi
Base class for transformation of a vector or RDD of vector
"""
def transform(self, vector):
"""
Applies transformation on a vector.
:param vector: vector to be transformed.
"""
raise NotImplementedError
class Normalizer(VectorTransformer):
"""
Normalizes samples individually to unit L\ :sup:`p`\ norm
For any 1 <= `p` < float('inf'), normalizes samples using
sum(abs(vector) :sup:`p`) :sup:`(1/p)` as norm.
For `p` = float('inf'), max(abs(vector)) will be used as norm for
normalization.
:param p: Normalization in L^p^ space, p = 2 by default.
>>> v = Vectors.dense(range(3))
>>> nor = Normalizer(1)
>>> nor.transform(v)
DenseVector([0.0, 0.3333, 0.6667])
>>> rdd = sc.parallelize([v])
>>> nor.transform(rdd).collect()
[DenseVector([0.0, 0.3333, 0.6667])]
>>> nor2 = Normalizer(float("inf"))
>>> nor2.transform(v)
DenseVector([0.0, 0.5, 1.0])
.. versionadded:: 1.2.0
"""
def __init__(self, p=2.0):
assert p >= 1.0, "p should be greater than 1.0"
self.p = float(p)
@since('1.2.0')
def transform(self, vector):
"""
Applies unit length normalization on a vector.
:param vector: vector or RDD of vector to be normalized.
:return: normalized vector. If the norm of the input is zero, it
will return the input vector.
"""
if isinstance(vector, RDD):
vector = vector.map(_convert_to_vector)
else:
vector = _convert_to_vector(vector)
return callMLlibFunc("normalizeVector", self.p, vector)
class JavaVectorTransformer(JavaModelWrapper, VectorTransformer):
"""
Wrapper for the model in JVM
"""
def transform(self, vector):
"""
Applies transformation on a vector or an RDD[Vector].
.. note:: In Python, transform cannot currently be used within
an RDD transformation or action.
Call transform directly on the RDD instead.
:param vector: Vector or RDD of Vector to be transformed.
"""
if isinstance(vector, RDD):
vector = vector.map(_convert_to_vector)
else:
vector = _convert_to_vector(vector)
return self.call("transform", vector)
class StandardScalerModel(JavaVectorTransformer):
"""
Represents a StandardScaler model that can transform vectors.
.. versionadded:: 1.2.0
"""
@since('1.2.0')
def transform(self, vector):
"""
Applies standardization transformation on a vector.
.. note:: In Python, transform cannot currently be used within
an RDD transformation or action.
Call transform directly on the RDD instead.
:param vector: Vector or RDD of Vector to be standardized.
:return: Standardized vector. If the variance of a column is
zero, it will return default `0.0` for the column with
zero variance.
"""
return JavaVectorTransformer.transform(self, vector)
@since('1.4.0')
def setWithMean(self, withMean):
"""
Setter of the boolean which decides
whether it uses mean or not
"""
self.call("setWithMean", withMean)
return self
@since('1.4.0')
def setWithStd(self, withStd):
"""
Setter of the boolean which decides
whether it uses std or not
"""
self.call("setWithStd", withStd)
return self
@property
@since('2.0.0')
def withStd(self):
"""
Returns if the model scales the data to unit standard deviation.
"""
return self.call("withStd")
@property
@since('2.0.0')
def withMean(self):
"""
Returns if the model centers the data before scaling.
"""
return self.call("withMean")
@property
@since('2.0.0')
def std(self):
"""
Return the column standard deviation values.
"""
return self.call("std")
@property
@since('2.0.0')
def mean(self):
"""
Return the column mean values.
"""
return self.call("mean")
class StandardScaler(object):
"""
Standardizes features by removing the mean and scaling to unit
variance using column summary statistics on the samples in the
training set.
:param withMean: False by default. Centers the data with mean
before scaling. It will build a dense output, so take
care when applying to sparse input.
:param withStd: True by default. Scales the data to unit
standard deviation.
>>> vs = [Vectors.dense([-2.0, 2.3, 0]), Vectors.dense([3.8, 0.0, 1.9])]
>>> dataset = sc.parallelize(vs)
>>> standardizer = StandardScaler(True, True)
>>> model = standardizer.fit(dataset)
>>> result = model.transform(dataset)
>>> for r in result.collect(): r
DenseVector([-0.7071, 0.7071, -0.7071])
DenseVector([0.7071, -0.7071, 0.7071])
>>> int(model.std[0])
4
>>> int(model.mean[0]*10)
9
>>> model.withStd
True
>>> model.withMean
True
.. versionadded:: 1.2.0
"""
def __init__(self, withMean=False, withStd=True):
if not (withMean or withStd):
warnings.warn("Both withMean and withStd are false. The model does nothing.")
self.withMean = withMean
self.withStd = withStd
@since('1.2.0')
def fit(self, dataset):
"""
Computes the mean and variance and stores as a model to be used
for later scaling.
:param dataset: The data used to compute the mean and variance
to build the transformation model.
:return: a StandardScalarModel
"""
dataset = dataset.map(_convert_to_vector)
jmodel = callMLlibFunc("fitStandardScaler", self.withMean, self.withStd, dataset)
return StandardScalerModel(jmodel)
class ChiSqSelectorModel(JavaVectorTransformer):
"""
Represents a Chi Squared selector model.
.. versionadded:: 1.4.0
"""
@since('1.4.0')
def transform(self, vector):
"""
Applies transformation on a vector.
:param vector: Vector or RDD of Vector to be transformed.
:return: transformed vector.
"""
return JavaVectorTransformer.transform(self, vector)
class ChiSqSelector(object):
"""
Creates a ChiSquared feature selector.
The selector supports different selection methods: `numTopFeatures`, `percentile`, `fpr`,
`fdr`, `fwe`.
* `numTopFeatures` chooses a fixed number of top features according to a chi-squared test.
* `percentile` is similar but chooses a fraction of all features
instead of a fixed number.
* `fpr` chooses all features whose p-values are below a threshold,
thus controlling the false positive rate of selection.
* `fdr` uses the `Benjamini-Hochberg procedure <https://en.wikipedia.org/wiki/
False_discovery_rate#Benjamini.E2.80.93Hochberg_procedure>`_
to choose all features whose false discovery rate is below a threshold.
* `fwe` chooses all features whose p-values are below a threshold. The threshold is scaled by
1/numFeatures, thus controlling the family-wise error rate of selection.
By default, the selection method is `numTopFeatures`, with the default number of top features
set to 50.
>>> data = sc.parallelize([
... LabeledPoint(0.0, SparseVector(3, {0: 8.0, 1: 7.0})),
... LabeledPoint(1.0, SparseVector(3, {1: 9.0, 2: 6.0})),
... LabeledPoint(1.0, [0.0, 9.0, 8.0]),
... LabeledPoint(2.0, [7.0, 9.0, 5.0]),
... LabeledPoint(2.0, [8.0, 7.0, 3.0])
... ])
>>> model = ChiSqSelector(numTopFeatures=1).fit(data)
>>> model.transform(SparseVector(3, {1: 9.0, 2: 6.0}))
SparseVector(1, {})
>>> model.transform(DenseVector([7.0, 9.0, 5.0]))
DenseVector([7.0])
>>> model = ChiSqSelector(selectorType="fpr", fpr=0.2).fit(data)
>>> model.transform(SparseVector(3, {1: 9.0, 2: 6.0}))
SparseVector(1, {})
>>> model.transform(DenseVector([7.0, 9.0, 5.0]))
DenseVector([7.0])
>>> model = ChiSqSelector(selectorType="percentile", percentile=0.34).fit(data)
>>> model.transform(DenseVector([7.0, 9.0, 5.0]))
DenseVector([7.0])
.. versionadded:: 1.4.0
"""
def __init__(self, numTopFeatures=50, selectorType="numTopFeatures", percentile=0.1, fpr=0.05,
fdr=0.05, fwe=0.05):
self.numTopFeatures = numTopFeatures
self.selectorType = selectorType
self.percentile = percentile
self.fpr = fpr
self.fdr = fdr
self.fwe = fwe
@since('2.1.0')
def setNumTopFeatures(self, numTopFeatures):
"""
set numTopFeature for feature selection by number of top features.
Only applicable when selectorType = "numTopFeatures".
"""
self.numTopFeatures = int(numTopFeatures)
return self
@since('2.1.0')
def setPercentile(self, percentile):
"""
set percentile [0.0, 1.0] for feature selection by percentile.
Only applicable when selectorType = "percentile".
"""
self.percentile = float(percentile)
return self
@since('2.1.0')
def setFpr(self, fpr):
"""
set FPR [0.0, 1.0] for feature selection by FPR.
Only applicable when selectorType = "fpr".
"""
self.fpr = float(fpr)
return self
@since('2.2.0')
def setFdr(self, fdr):
"""
set FDR [0.0, 1.0] for feature selection by FDR.
Only applicable when selectorType = "fdr".
"""
self.fdr = float(fdr)
return self
@since('2.2.0')
def setFwe(self, fwe):
"""
set FWE [0.0, 1.0] for feature selection by FWE.
Only applicable when selectorType = "fwe".
"""
self.fwe = float(fwe)
return self
@since('2.1.0')
def setSelectorType(self, selectorType):
"""
set the selector type of the ChisqSelector.
Supported options: "numTopFeatures" (default), "percentile", "fpr", "fdr", "fwe".
"""
self.selectorType = str(selectorType)
return self
@since('1.4.0')
def fit(self, data):
"""
Returns a ChiSquared feature selector.
:param data: an `RDD[LabeledPoint]` containing the labeled dataset
with categorical features. Real-valued features will be
treated as categorical for each distinct value.
Apply feature discretizer before using this function.
"""
jmodel = callMLlibFunc("fitChiSqSelector", self.selectorType, self.numTopFeatures,
self.percentile, self.fpr, self.fdr, self.fwe, data)
return ChiSqSelectorModel(jmodel)
class PCAModel(JavaVectorTransformer):
"""
Model fitted by [[PCA]] that can project vectors to a low-dimensional space using PCA.
.. versionadded:: 1.5.0
"""
class PCA(object):
"""
A feature transformer that projects vectors to a low-dimensional space using PCA.
>>> data = [Vectors.sparse(5, [(1, 1.0), (3, 7.0)]),
... Vectors.dense([2.0, 0.0, 3.0, 4.0, 5.0]),
... Vectors.dense([4.0, 0.0, 0.0, 6.0, 7.0])]
>>> model = PCA(2).fit(sc.parallelize(data))
>>> pcArray = model.transform(Vectors.sparse(5, [(1, 1.0), (3, 7.0)])).toArray()
>>> pcArray[0]
1.648...
>>> pcArray[1]
-4.013...
.. versionadded:: 1.5.0
"""
def __init__(self, k):
"""
:param k: number of principal components.
"""
self.k = int(k)
@since('1.5.0')
def fit(self, data):
"""
Computes a [[PCAModel]] that contains the principal components of the input vectors.
:param data: source vectors
"""
jmodel = callMLlibFunc("fitPCA", self.k, data)
return PCAModel(jmodel)
class HashingTF(object):
"""
Maps a sequence of terms to their term frequencies using the hashing
trick.
.. note:: The terms must be hashable (can not be dict/set/list...).
:param numFeatures: number of features (default: 2^20)
>>> htf = HashingTF(100)
>>> doc = "a a b b c d".split(" ")
>>> htf.transform(doc)
SparseVector(100, {...})
.. versionadded:: 1.2.0
"""
def __init__(self, numFeatures=1 << 20):
self.numFeatures = numFeatures
self.binary = False
@since("2.0.0")
def setBinary(self, value):
"""
If True, term frequency vector will be binary such that non-zero
term counts will be set to 1
(default: False)
"""
self.binary = value
return self
@since('1.2.0')
def indexOf(self, term):
""" Returns the index of the input term. """
return hash(term) % self.numFeatures
@since('1.2.0')
def transform(self, document):
"""
Transforms the input document (list of terms) to term frequency
vectors, or transform the RDD of document to RDD of term
frequency vectors.
"""
if isinstance(document, RDD):
return document.map(self.transform)
freq = {}
for term in document:
i = self.indexOf(term)
freq[i] = 1.0 if self.binary else freq.get(i, 0) + 1.0
return Vectors.sparse(self.numFeatures, freq.items())
class IDFModel(JavaVectorTransformer):
"""
Represents an IDF model that can transform term frequency vectors.
.. versionadded:: 1.2.0
"""
@since('1.2.0')
def transform(self, x):
"""
Transforms term frequency (TF) vectors to TF-IDF vectors.
If `minDocFreq` was set for the IDF calculation,
the terms which occur in fewer than `minDocFreq`
documents will have an entry of 0.
.. note:: In Python, transform cannot currently be used within
an RDD transformation or action.
Call transform directly on the RDD instead.
:param x: an RDD of term frequency vectors or a term frequency
vector
:return: an RDD of TF-IDF vectors or a TF-IDF vector
"""
return JavaVectorTransformer.transform(self, x)
@since('1.4.0')
def idf(self):
"""
Returns the current IDF vector.
"""
return self.call('idf')
class IDF(object):
"""
Inverse document frequency (IDF).
The standard formulation is used: `idf = log((m + 1) / (d(t) + 1))`,
where `m` is the total number of documents and `d(t)` is the number
of documents that contain term `t`.
This implementation supports filtering out terms which do not appear
in a minimum number of documents (controlled by the variable
`minDocFreq`). For terms that are not in at least `minDocFreq`
documents, the IDF is found as 0, resulting in TF-IDFs of 0.
:param minDocFreq: minimum of documents in which a term
should appear for filtering
>>> n = 4
>>> freqs = [Vectors.sparse(n, (1, 3), (1.0, 2.0)),
... Vectors.dense([0.0, 1.0, 2.0, 3.0]),
... Vectors.sparse(n, [1], [1.0])]
>>> data = sc.parallelize(freqs)
>>> idf = IDF()
>>> model = idf.fit(data)
>>> tfidf = model.transform(data)
>>> for r in tfidf.collect(): r
SparseVector(4, {1: 0.0, 3: 0.5754})
DenseVector([0.0, 0.0, 1.3863, 0.863])
SparseVector(4, {1: 0.0})
>>> model.transform(Vectors.dense([0.0, 1.0, 2.0, 3.0]))
DenseVector([0.0, 0.0, 1.3863, 0.863])
>>> model.transform([0.0, 1.0, 2.0, 3.0])
DenseVector([0.0, 0.0, 1.3863, 0.863])
>>> model.transform(Vectors.sparse(n, (1, 3), (1.0, 2.0)))
SparseVector(4, {1: 0.0, 3: 0.5754})
.. versionadded:: 1.2.0
"""
def __init__(self, minDocFreq=0):
self.minDocFreq = minDocFreq
@since('1.2.0')
def fit(self, dataset):
"""
Computes the inverse document frequency.
:param dataset: an RDD of term frequency vectors
"""
if not isinstance(dataset, RDD):
raise TypeError("dataset should be an RDD of term frequency vectors")
jmodel = callMLlibFunc("fitIDF", self.minDocFreq, dataset.map(_convert_to_vector))
return IDFModel(jmodel)
class Word2VecModel(JavaVectorTransformer, JavaSaveable, JavaLoader):
"""
class for Word2Vec model
.. versionadded:: 1.2.0
"""
@since('1.2.0')
def transform(self, word):
"""
Transforms a word to its vector representation
.. note:: Local use only
:param word: a word
:return: vector representation of word(s)
"""
try:
return self.call("transform", word)
except Py4JJavaError:
raise ValueError("%s not found" % word)
@since('1.2.0')
def findSynonyms(self, word, num):
"""
Find synonyms of a word
:param word: a word or a vector representation of word
:param num: number of synonyms to find
:return: array of (word, cosineSimilarity)
.. note:: Local use only
"""
if not isinstance(word, basestring):
word = _convert_to_vector(word)
words, similarity = self.call("findSynonyms", word, num)
return zip(words, similarity)
@since('1.4.0')
def getVectors(self):
"""
Returns a map of words to their vector representations.
"""
return self.call("getVectors")
@classmethod
@since('1.5.0')
def load(cls, sc, path):
"""
Load a model from the given path.
"""
jmodel = sc._jvm.org.apache.spark.mllib.feature \
.Word2VecModel.load(sc._jsc.sc(), path)
model = sc._jvm.org.apache.spark.mllib.api.python.Word2VecModelWrapper(jmodel)
return Word2VecModel(model)
@ignore_unicode_prefix
class Word2Vec(object):
"""Word2Vec creates vector representation of words in a text corpus.
The algorithm first constructs a vocabulary from the corpus
and then learns vector representation of words in the vocabulary.
The vector representation can be used as features in
natural language processing and machine learning algorithms.
We used skip-gram model in our implementation and hierarchical
softmax method to train the model. The variable names in the
implementation matches the original C implementation.
For original C implementation,
see https://code.google.com/p/word2vec/
For research papers, see
Efficient Estimation of Word Representations in Vector Space
and Distributed Representations of Words and Phrases and their
Compositionality.
>>> sentence = "a b " * 100 + "a c " * 10
>>> localDoc = [sentence, sentence]
>>> doc = sc.parallelize(localDoc).map(lambda line: line.split(" "))
>>> model = Word2Vec().setVectorSize(10).setSeed(42).fit(doc)
Querying for synonyms of a word will not return that word:
>>> syms = model.findSynonyms("a", 2)
>>> [s[0] for s in syms]
[u'b', u'c']
But querying for synonyms of a vector may return the word whose
representation is that vector:
>>> vec = model.transform("a")
>>> syms = model.findSynonyms(vec, 2)
>>> [s[0] for s in syms]
[u'a', u'b']
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> model.save(sc, path)
>>> sameModel = Word2VecModel.load(sc, path)
>>> model.transform("a") == sameModel.transform("a")
True
>>> syms = sameModel.findSynonyms("a", 2)
>>> [s[0] for s in syms]
[u'b', u'c']
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except OSError:
... pass
.. versionadded:: 1.2.0
"""
def __init__(self):
"""
Construct Word2Vec instance
"""
self.vectorSize = 100
self.learningRate = 0.025
self.numPartitions = 1
self.numIterations = 1
self.seed = None
self.minCount = 5
self.windowSize = 5
@since('1.2.0')
def setVectorSize(self, vectorSize):
"""
Sets vector size (default: 100).
"""
self.vectorSize = vectorSize
return self
@since('1.2.0')
def setLearningRate(self, learningRate):
"""
Sets initial learning rate (default: 0.025).
"""
self.learningRate = learningRate
return self
@since('1.2.0')
def setNumPartitions(self, numPartitions):
"""
Sets number of partitions (default: 1). Use a small number for
accuracy.
"""
self.numPartitions = numPartitions
return self
@since('1.2.0')
def setNumIterations(self, numIterations):
"""
Sets number of iterations (default: 1), which should be smaller
than or equal to number of partitions.
"""
self.numIterations = numIterations
return self
@since('1.2.0')
def setSeed(self, seed):
"""
Sets random seed.
"""
self.seed = seed
return self
@since('1.4.0')
def setMinCount(self, minCount):
"""
Sets minCount, the minimum number of times a token must appear
to be included in the word2vec model's vocabulary (default: 5).
"""
self.minCount = minCount
return self
@since('2.0.0')
def setWindowSize(self, windowSize):
"""
Sets window size (default: 5).
"""
self.windowSize = windowSize
return self
@since('1.2.0')
def fit(self, data):
"""
Computes the vector representation of each word in vocabulary.
:param data: training data. RDD of list of string
:return: Word2VecModel instance
"""
if not isinstance(data, RDD):
raise TypeError("data should be an RDD of list of string")
jmodel = callMLlibFunc("trainWord2VecModel", data, int(self.vectorSize),
float(self.learningRate), int(self.numPartitions),
int(self.numIterations), self.seed,
int(self.minCount), int(self.windowSize))
return Word2VecModel(jmodel)
class ElementwiseProduct(VectorTransformer):
"""
Scales each column of the vector, with the supplied weight vector.
i.e the elementwise product.
>>> weight = Vectors.dense([1.0, 2.0, 3.0])
>>> eprod = ElementwiseProduct(weight)
>>> a = Vectors.dense([2.0, 1.0, 3.0])
>>> eprod.transform(a)
DenseVector([2.0, 2.0, 9.0])
>>> b = Vectors.dense([9.0, 3.0, 4.0])
>>> rdd = sc.parallelize([a, b])
>>> eprod.transform(rdd).collect()
[DenseVector([2.0, 2.0, 9.0]), DenseVector([9.0, 6.0, 12.0])]
.. versionadded:: 1.5.0
"""
def __init__(self, scalingVector):
self.scalingVector = _convert_to_vector(scalingVector)
@since('1.5.0')
def transform(self, vector):
"""
Computes the Hadamard product of the vector.
"""
if isinstance(vector, RDD):
vector = vector.map(_convert_to_vector)
else:
vector = _convert_to_vector(vector)
return callMLlibFunc("elementwiseProductVector", self.scalingVector, vector)
def _test():
import doctest
from pyspark.sql import SparkSession
globs = globals().copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("mllib.feature tests")\
.getOrCreate()
globs['sc'] = spark.sparkContext
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
sys.path.pop(0)
_test()
|
kirillzhuravlev/numpy
|
refs/heads/master
|
numpy/f2py/setup.py
|
158
|
#!/usr/bin/env python
"""
setup.py for installing F2PY
Usage:
python setup.py install
Copyright 2001-2005 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@cens.ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.32 $
$Date: 2005/01/30 17:22:14 $
Pearu Peterson
"""
from __future__ import division, print_function
__version__ = "$Id: setup.py,v 1.32 2005/01/30 17:22:14 pearu Exp $"
import os
import sys
from distutils.dep_util import newer
from numpy.distutils import log
from numpy.distutils.core import setup
from numpy.distutils.misc_util import Configuration
from __version__ import version
def _get_f2py_shebang():
""" Return shebang line for f2py script
If we are building a binary distribution format, then the shebang line
should be ``#!python`` rather than ``#!`` followed by the contents of
``sys.executable``.
"""
if set(('bdist_wheel', 'bdist_egg', 'bdist_wininst',
'bdist_rpm')).intersection(sys.argv):
return '#!python'
return '#!' + sys.executable
def configuration(parent_package='', top_path=None):
config = Configuration('f2py', parent_package, top_path)
config.add_data_dir('tests')
config.add_data_files('src/fortranobject.c',
'src/fortranobject.h',
)
config.make_svn_version_py()
def generate_f2py_py(build_dir):
f2py_exe = 'f2py' + os.path.basename(sys.executable)[6:]
if f2py_exe[-4:] == '.exe':
f2py_exe = f2py_exe[:-4] + '.py'
if 'bdist_wininst' in sys.argv and f2py_exe[-3:] != '.py':
f2py_exe = f2py_exe + '.py'
target = os.path.join(build_dir, f2py_exe)
if newer(__file__, target):
log.info('Creating %s', target)
f = open(target, 'w')
f.write(_get_f2py_shebang() + '\n')
mainloc = os.path.join(os.path.dirname(__file__), "__main__.py")
with open(mainloc) as mf:
f.write(mf.read())
f.close()
return target
config.add_scripts(generate_f2py_py)
log.info('F2PY Version %s', config.get_version())
return config
if __name__ == "__main__":
config = configuration(top_path='')
print('F2PY Version', version)
config = config.todict()
config['download_url'] = "http://cens.ioc.ee/projects/f2py2e/2.x"\
"/F2PY-2-latest.tar.gz"
config['classifiers'] = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: NumPy License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: C',
'Programming Language :: Fortran',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Software Development :: Code Generators',
]
setup(version=version,
description="F2PY - Fortran to Python Interface Generaton",
author="Pearu Peterson",
author_email="pearu@cens.ioc.ee",
maintainer="Pearu Peterson",
maintainer_email="pearu@cens.ioc.ee",
license="BSD",
platforms="Unix, Windows (mingw|cygwin), Mac OSX",
long_description="""\
The Fortran to Python Interface Generator, or F2PY for short, is a
command line tool (f2py) for generating Python C/API modules for
wrapping Fortran 77/90/95 subroutines, accessing common blocks from
Python, and calling Python functions from Fortran (call-backs).
Interfacing subroutines/data from Fortran 90/95 modules is supported.""",
url="http://cens.ioc.ee/projects/f2py2e/",
keywords=['Fortran', 'f2py'],
**config)
|
fingeronthebutton/robotframework
|
refs/heads/master
|
atest/testdata/test_libraries/PrintLib.py
|
16
|
import sys
def print_one_html_line():
print '*HTML* <a href="http://www.google.com">Google</a>'
def print_many_html_lines():
print '*HTML* <table border=1>\n<tr><td>0,0</td><td>0,1</td></tr>'
print '<tr><td>1,0</td><td>1,1</td></tr>\n</table>'
print '*HTML*This is html <hr>'
print '*INFO*This is not html <br>'
def print_html_to_stderr():
print >> sys.stderr, '*HTML* <i>Hello, stderr!!</i>'
def print_with_all_levels():
for level in 'TRACE DEBUG INFO HTML WARN ERROR'.split():
print '*%s* %s message' % (level, level.title())
|
anistark/mozillians
|
refs/heads/master
|
vendor-local/lib/python/celery/apps/worker.py
|
10
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import atexit
import logging
try:
import multiprocessing
except ImportError:
multiprocessing = None # noqa
import os
import socket
import sys
import warnings
from .. import __version__, platforms, signals
from ..app import app_or_default
from ..app.abstract import configurated, from_config
from ..exceptions import ImproperlyConfigured, SystemTerminate
from ..utils import cry, isatty, LOG_LEVELS, pluralize, qualname
from ..worker import WorkController
try:
from greenlet import GreenletExit
IGNORE_ERRORS = (GreenletExit, )
except ImportError:
IGNORE_ERRORS = ()
BANNER = """
-------------- celery@%(hostname)s v%(version)s
---- **** -----
--- * *** * -- [Configuration]
-- * - **** --- . broker: %(conninfo)s
- ** ---------- . loader: %(loader)s
- ** ---------- . logfile: %(logfile)s@%(loglevel)s
- ** ---------- . concurrency: %(concurrency)s
- ** ---------- . events: %(events)s
- *** --- * --- . beat: %(celerybeat)s
-- ******* ----
--- ***** ----- [Queues]
-------------- %(queues)s
"""
EXTRA_INFO_FMT = """
[Tasks]
%(tasks)s
"""
UNKNOWN_QUEUE_ERROR = """\
Trying to select queue subset of %r, but queue %s is not
defined in the CELERY_QUEUES setting.
If you want to automatically declare unknown queues you can
enable the CELERY_CREATE_MISSING_QUEUES setting.
"""
def cpu_count():
if multiprocessing is not None:
try:
return multiprocessing.cpu_count()
except NotImplementedError:
pass
return 2
def get_process_name():
if multiprocessing is not None:
return multiprocessing.current_process().name
class Worker(configurated):
WorkController = WorkController
inherit_confopts = (WorkController, )
loglevel = from_config("log_level")
redirect_stdouts = from_config()
redirect_stdouts_level = from_config()
def __init__(self, hostname=None, discard=False, embed_clockservice=False,
queues=None, include=None, app=None, pidfile=None,
autoscale=None, autoreload=False, **kwargs):
self.app = app = app_or_default(app)
self.hostname = hostname or socket.gethostname()
# this signal can be used to set up configuration for
# workers by name.
signals.celeryd_init.send(sender=self.hostname, instance=self,
conf=self.app.conf)
self.setup_defaults(kwargs, namespace="celeryd")
if not self.concurrency:
self.concurrency = cpu_count()
self.discard = discard
self.embed_clockservice = embed_clockservice
if self.app.IS_WINDOWS and self.embed_clockservice:
self.die("-B option does not work on Windows. "
"Please run celerybeat as a separate service.")
self.use_queues = [] if queues is None else queues
self.queues = None
self.include = [] if include is None else include
self.pidfile = pidfile
self.autoscale = None
self.autoreload = autoreload
if autoscale:
max_c, _, min_c = autoscale.partition(",")
self.autoscale = [int(max_c), min_c and int(min_c) or 0]
self._isatty = isatty(sys.stdout)
self.colored = app.log.colored(self.logfile)
if isinstance(self.use_queues, basestring):
self.use_queues = self.use_queues.split(",")
if isinstance(self.include, basestring):
self.include = self.include.split(",")
if not isinstance(self.loglevel, int):
try:
self.loglevel = LOG_LEVELS[self.loglevel.upper()]
except KeyError:
self.die("Unknown level %r. Please use one of %s." % (
self.loglevel,
"|".join(l for l in LOG_LEVELS.keys()
if isinstance(l, basestring))))
def run(self):
self.init_loader()
self.init_queues()
self.worker_init()
self.redirect_stdouts_to_logger()
if getattr(os, "getuid", None) and os.getuid() == 0:
warnings.warn(RuntimeWarning(
"Running celeryd with superuser privileges is discouraged!"))
if self.discard:
self.purge_messages()
# Dump configuration to screen so we have some basic information
# for when users sends bug reports.
print(str(self.colored.cyan(" \n", self.startup_info())) +
str(self.colored.reset(self.extra_info())))
self.set_process_status("-active-")
try:
self.run_worker()
except IGNORE_ERRORS:
pass
def on_consumer_ready(self, consumer):
signals.worker_ready.send(sender=consumer)
print("celery@%s has started." % self.hostname)
def init_queues(self):
try:
self.app.select_queues(self.use_queues)
except KeyError, exc:
raise ImproperlyConfigured(
UNKNOWN_QUEUE_ERROR % (self.use_queues, exc))
def init_loader(self):
self.loader = self.app.loader
self.settings = self.app.conf
for module in self.include:
self.loader.import_task_module(module)
def redirect_stdouts_to_logger(self):
self.app.log.setup(self.loglevel, self.logfile,
self.redirect_stdouts,
self.redirect_stdouts_level)
def purge_messages(self):
count = self.app.control.discard_all()
print("discard: Erased %d %s from the queue.\n" % (
count, pluralize(count, "message")))
def worker_init(self):
# Run the worker init handler.
# (Usually imports task modules and such.)
self.loader.init_worker()
def tasklist(self, include_builtins=True):
from ..registry import tasks
tasklist = tasks.keys()
if not include_builtins:
tasklist = filter(lambda s: not s.startswith("celery."),
tasklist)
return "\n".join(" . %s" % task for task in sorted(tasklist))
def extra_info(self):
if self.loglevel <= logging.INFO:
include_builtins = self.loglevel <= logging.DEBUG
tasklist = self.tasklist(include_builtins=include_builtins)
return EXTRA_INFO_FMT % {"tasks": tasklist}
return ""
def startup_info(self):
app = self.app
concurrency = self.concurrency
if self.autoscale:
cmax, cmin = self.autoscale
concurrency = "{min=%s, max=%s}" % (cmin, cmax)
return BANNER % {
"hostname": self.hostname,
"version": __version__,
"conninfo": self.app.broker_connection().as_uri(),
"concurrency": concurrency,
"loglevel": LOG_LEVELS[self.loglevel],
"logfile": self.logfile or "[stderr]",
"celerybeat": "ON" if self.embed_clockservice else "OFF",
"events": "ON" if self.send_events else "OFF",
"loader": qualname(self.loader),
"queues": app.amqp.queues.format(indent=18, indent_first=False),
}
def run_worker(self):
if self.pidfile:
pidlock = platforms.create_pidlock(self.pidfile).acquire()
atexit.register(pidlock.release)
worker = self.WorkController(app=self.app,
hostname=self.hostname,
ready_callback=self.on_consumer_ready,
embed_clockservice=self.embed_clockservice,
autoscale=self.autoscale,
autoreload=self.autoreload,
**self.confopts_as_dict())
self.install_platform_tweaks(worker)
signals.worker_init.send(sender=worker)
worker.start()
def install_platform_tweaks(self, worker):
"""Install platform specific tweaks and workarounds."""
if self.app.IS_OSX:
self.osx_proxy_detection_workaround()
# Install signal handler so SIGHUP restarts the worker.
if not self._isatty:
# only install HUP handler if detached from terminal,
# so closing the terminal window doesn't restart celeryd
# into the background.
if self.app.IS_OSX:
# OS X can't exec from a process using threads.
# See http://github.com/celery/celery/issues#issue/152
install_HUP_not_supported_handler(worker)
else:
install_worker_restart_handler(worker)
install_worker_term_handler(worker)
install_worker_term_hard_handler(worker)
install_worker_int_handler(worker)
install_cry_handler(worker.logger)
install_rdb_handler()
def osx_proxy_detection_workaround(self):
"""See http://github.com/celery/celery/issues#issue/161"""
os.environ.setdefault("celery_dummy_proxy", "set_by_celeryd")
def set_process_status(self, info):
info = "%s (%s)" % (info, platforms.strargv(sys.argv))
return platforms.set_mp_process_title("celeryd",
info=info,
hostname=self.hostname)
def die(self, msg, exitcode=1):
sys.stderr.write("Error: %s\n" % (msg, ))
sys.exit(exitcode)
def install_worker_int_handler(worker):
def _stop(signum, frame):
process_name = get_process_name()
if not process_name or process_name == "MainProcess":
print("celeryd: Hitting Ctrl+C again will terminate "
"all running tasks!")
install_worker_int_again_handler(worker)
print("celeryd: Warm shutdown (%s)" % (process_name, ))
worker.stop(in_sighandler=True)
raise SystemExit()
platforms.signals["SIGINT"] = _stop
def install_worker_int_again_handler(worker):
def _stop(signum, frame):
process_name = get_process_name()
if not process_name or process_name == "MainProcess":
print("celeryd: Cold shutdown (%s)" % (process_name, ))
worker.terminate(in_sighandler=True)
raise SystemTerminate()
platforms.signals["SIGINT"] = _stop
def install_worker_term_handler(worker):
def _stop(signum, frame):
process_name = get_process_name()
if not process_name or process_name == "MainProcess":
print("celeryd: Warm shutdown (%s)" % (process_name, ))
worker.stop(in_sighandler=True)
raise SystemExit()
platforms.signals["SIGTERM"] = _stop
def install_worker_term_hard_handler(worker):
def _stop(signum, frame):
process_name = get_process_name()
if not process_name or process_name == "MainProcess":
print("celeryd: Cold shutdown (%s)" % (process_name, ))
worker.terminate(in_sighandler=True)
raise SystemTerminate()
platforms.signals["SIGQUIT"] = _stop
def install_worker_restart_handler(worker):
def restart_worker_sig_handler(signum, frame):
"""Signal handler restarting the current python program."""
print("Restarting celeryd (%s)" % (" ".join(sys.argv), ))
worker.stop(in_sighandler=True)
os.execv(sys.executable, [sys.executable] + sys.argv)
platforms.signals["SIGHUP"] = restart_worker_sig_handler
def install_cry_handler(logger):
# Jython/PyPy does not have sys._current_frames
is_jython = sys.platform.startswith("java")
is_pypy = hasattr(sys, "pypy_version_info")
if not (is_jython or is_pypy):
def cry_handler(signum, frame):
"""Signal handler logging the stacktrace of all active threads."""
logger.error("\n" + cry())
platforms.signals["SIGUSR1"] = cry_handler
def install_rdb_handler(envvar="CELERY_RDBSIG"): # pragma: no cover
def rdb_handler(signum, frame):
"""Signal handler setting a rdb breakpoint at the current frame."""
from ..contrib import rdb
rdb.set_trace(frame)
if os.environ.get(envvar):
platforms.signals["SIGUSR2"] = rdb_handler
def install_HUP_not_supported_handler(worker):
def warn_on_HUP_handler(signum, frame):
worker.logger.error("SIGHUP not supported: "
"Restarting with HUP is unstable on this platform!")
platforms.signals["SIGHUP"] = warn_on_HUP_handler
|
tmxdyf/CouchPotatoServer
|
refs/heads/master
|
libs/apscheduler/triggers/__init__.py
|
134
|
from apscheduler.triggers.cron import CronTrigger
from apscheduler.triggers.interval import IntervalTrigger
from apscheduler.triggers.simple import SimpleTrigger
|
yeming233/rally
|
refs/heads/master
|
rally/plugins/openstack/context/neutron/__init__.py
|
12133432
| |
browncoat-ninjas/nimoy
|
refs/heads/develop
|
specs/nimoy/assertions/__init__.py
|
12133432
| |
valentin-krasontovitsch/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/netvisor/pn_access_list_ip.py
|
14
|
#!/usr/bin/python
# Copyright: (c) 2018, Pluribus Networks
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: pn_access_list_ip
author: "Pluribus Networks (@rajaspachipulusu17)"
version_added: "2.8"
short_description: CLI command to add/remove access-list-ip
description:
- This modules can be used to add and remove IPs associated with access list.
options:
pn_cliswitch:
description:
- Target switch to run the CLI on.
required: False
type: str
state:
description:
- State the action to perform. Use 'present' to add access-list-ip and
'absent' to remove access-list-ip.
required: True
choices: ["present", "absent"]
pn_ip:
description:
- IP associated with the access list.
required: False
default: '::'
type: str
pn_name:
description:
- Access List Name.
required: False
type: str
"""
EXAMPLES = """
- name: access list ip functionality
pn_access_list_ip:
pn_cliswitch: "sw01"
pn_name: "foo"
pn_ip: "172.16.3.1"
state: "present"
- name: access list ip functionality
pn_access_list_ip:
pn_cliswitch: "sw01"
pn_name: "foo"
pn_ip: "172.16.3.1"
state: "absent"
"""
RETURN = """
command:
description: the CLI command run on the target node.
returned: always
type: str
stdout:
description: set of responses from the access-list-ip command.
returned: always
type: list
stderr:
description: set of error responses from the access-list-ip command.
returned: on error
type: list
changed:
description: indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli
def check_cli(module, cli):
"""
This method checks for idempotency using the access-list-ip-show command.
If ip exists, return True else False.
:param module: The Ansible module to fetch input parameters
:param cli: The CLI string
"""
name = module.params['pn_name']
ip = module.params['pn_ip']
cli += ' access-list-ip-show name %s format ip no-show-headers' % name
out = module.run_command(cli.split(), use_unsafe_shell=True)[1]
out = out.split()
return True if ip in out else False
def main():
""" This section is for arguments parsing """
global state_map
state_map = dict(
present='access-list-ip-add',
absent='access-list-ip-remove',
)
module = AnsibleModule(
argument_spec=dict(
pn_cliswitch=dict(required=False, type='str'),
state=dict(required=True, type='str',
choices=state_map.keys()),
pn_ip=dict(required=False, type='str', default='::'),
pn_name=dict(required=False, type='str'),
),
required_if=(
["state", "present", ["pn_name"]],
["state", "absent", ["pn_name", "pn_ip"]],
),
)
# Accessing the arguments
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
ip = module.params['pn_ip']
name = module.params['pn_name']
command = state_map[state]
# Building the CLI command string
cli = pn_cli(module, cliswitch)
IP_EXISTS = check_cli(module, cli)
cli += ' %s name %s ' % (command, name)
if command == 'access-list-ip-remove':
if IP_EXISTS is False:
module.exit_json(
skipped=True,
msg='access-list with ip %s does not exist' % ip
)
if ip:
cli += ' ip ' + ip
else:
if command == 'access-list-ip-add':
if IP_EXISTS is True:
module.exit_json(
skipped=True,
msg='access list with ip %s already exists' % ip
)
if ip:
cli += ' ip ' + ip
run_cli(module, cli, state_map)
if __name__ == '__main__':
main()
|
Entropy512/libsigrokdecode
|
refs/heads/master
|
decoders/parallel/__init__.py
|
5
|
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2013 Uwe Hermann <uwe@hermann-uwe.de>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
'''
This protocol decoder can decode synchronous parallel buses with various
number of data bits/channels and one (optional) clock line.
If no clock line is supplied, the decoder works slightly differently in
that it interprets every transition on any of the supplied data channels
like there had been a clock transition.
It is required to use the lowest data channels, and use consecutive ones.
For example, for a 4-bit sync parallel bus, channels D0/D1/D2/D3 (and CLK)
should be used. Using combinations like D7/D12/D3/D15 is not supported.
For an 8-bit bus you should use D0-D7, for a 16-bit bus use D0-D15 and so on.
'''
from .pd import Decoder
|
weimingtom/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/distutils/tests/test_msvc9compiler.py
|
47
|
"""Tests for distutils.msvc9compiler."""
import sys
import unittest
import os
from distutils.errors import DistutilsPlatformError
from distutils.tests import support
from test.support import run_unittest
_MANIFEST = """\
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1"
manifestVersion="1.0">
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level="asInvoker" uiAccess="false">
</requestedExecutionLevel>
</requestedPrivileges>
</security>
</trustInfo>
<dependency>
<dependentAssembly>
<assemblyIdentity type="win32" name="Microsoft.VC90.CRT"
version="9.0.21022.8" processorArchitecture="x86"
publicKeyToken="XXXX">
</assemblyIdentity>
</dependentAssembly>
</dependency>
<dependency>
<dependentAssembly>
<assemblyIdentity type="win32" name="Microsoft.VC90.MFC"
version="9.0.21022.8" processorArchitecture="x86"
publicKeyToken="XXXX"></assemblyIdentity>
</dependentAssembly>
</dependency>
</assembly>
"""
_CLEANED_MANIFEST = """\
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1"
manifestVersion="1.0">
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level="asInvoker" uiAccess="false">
</requestedExecutionLevel>
</requestedPrivileges>
</security>
</trustInfo>
<dependency>
</dependency>
<dependency>
<dependentAssembly>
<assemblyIdentity type="win32" name="Microsoft.VC90.MFC"
version="9.0.21022.8" processorArchitecture="x86"
publicKeyToken="XXXX"></assemblyIdentity>
</dependentAssembly>
</dependency>
</assembly>"""
if sys.platform=="win32":
from distutils.msvccompiler import get_build_version
if get_build_version()>=8.0:
SKIP_MESSAGE = None
else:
SKIP_MESSAGE = "These tests are only for MSVC8.0 or above"
else:
SKIP_MESSAGE = "These tests are only for win32"
@unittest.skipUnless(SKIP_MESSAGE is None, SKIP_MESSAGE)
class msvc9compilerTestCase(support.TempdirManager,
unittest.TestCase):
def test_no_compiler(self):
# makes sure query_vcvarsall throws
# a DistutilsPlatformError if the compiler
# is not found
from distutils.msvc9compiler import query_vcvarsall
def _find_vcvarsall(version):
return None
from distutils import msvc9compiler
old_find_vcvarsall = msvc9compiler.find_vcvarsall
msvc9compiler.find_vcvarsall = _find_vcvarsall
try:
self.assertRaises(DistutilsPlatformError, query_vcvarsall,
'wont find this version')
finally:
msvc9compiler.find_vcvarsall = old_find_vcvarsall
def test_reg_class(self):
from distutils.msvc9compiler import Reg
self.assertRaises(KeyError, Reg.get_value, 'xxx', 'xxx')
# looking for values that should exist on all
# windows registeries versions.
path = r'Control Panel\Desktop'
v = Reg.get_value(path, 'dragfullwindows')
self.assertTrue(v in ('0', '1', '2'))
import winreg
HKCU = winreg.HKEY_CURRENT_USER
keys = Reg.read_keys(HKCU, 'xxxx')
self.assertEqual(keys, None)
keys = Reg.read_keys(HKCU, r'Control Panel')
self.assertTrue('Desktop' in keys)
def test_remove_visual_c_ref(self):
from distutils.msvc9compiler import MSVCCompiler
tempdir = self.mkdtemp()
manifest = os.path.join(tempdir, 'manifest')
f = open(manifest, 'w')
try:
f.write(_MANIFEST)
finally:
f.close()
compiler = MSVCCompiler()
compiler._remove_visual_c_ref(manifest)
# see what we got
f = open(manifest)
try:
# removing trailing spaces
content = '\n'.join([line.rstrip() for line in f.readlines()])
finally:
f.close()
# makes sure the manifest was properly cleaned
self.assertEqual(content, _CLEANED_MANIFEST)
def test_suite():
return unittest.makeSuite(msvc9compilerTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
|
opennode/nodeconductor-assembly-waldur
|
refs/heads/develop
|
src/waldur_mastermind/marketplace_openstack/management/commands/import_tenant_invoices.py
|
1
|
from django.contrib.contenttypes.models import ContentType
from waldur_core.core.utils import DryRunCommand
from waldur_mastermind.invoices.models import InvoiceItem
from waldur_mastermind.marketplace.models import Resource
from waldur_mastermind.packages.models import OpenStackPackage
class Command(DryRunCommand):
help = """Import OpenStack invoice items from packages application."""
def handle(self, dry_run, *args, **options):
ct = ContentType.objects.get_for_model(OpenStackPackage)
for invoice_item in InvoiceItem.objects.filter(content_type=ct).exclude(
object_id=None
):
package = invoice_item.scope
if not package:
continue
tenant = package.tenant
try:
resource = Resource.objects.get(scope=tenant)
except Resource.DoesNotExist:
self.stdout.write(
self.style.ERROR(
'Marketplace resource for tenant with ID %s is not found.'
)
% invoice_item.scope.pk
)
else:
if dry_run:
self.stdout.write(
self.style.SUCCESS(
'Importing invoice item for package with ID %s.'
)
% invoice_item.scope.pk
)
else:
invoice_item.scope = resource
invoice_item.save()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.