repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
yufengg/tensorflow
|
refs/heads/master
|
tensorflow/python/lib/io/file_io_test.py
|
31
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Testing File IO operations in file_io.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
from tensorflow.python.framework import errors
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import test
class FileIoTest(test.TestCase):
def setUp(self):
self._base_dir = os.path.join(self.get_temp_dir(), "base_dir")
file_io.create_dir(self._base_dir)
def tearDown(self):
file_io.delete_recursively(self._base_dir)
def testFileDoesntExist(self):
file_path = os.path.join(self._base_dir, "temp_file")
self.assertFalse(file_io.file_exists(file_path))
with self.assertRaises(errors.NotFoundError):
_ = file_io.read_file_to_string(file_path)
def testWriteToString(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.write_string_to_file(file_path, "testing")
self.assertTrue(file_io.file_exists(file_path))
file_contents = file_io.read_file_to_string(file_path)
self.assertEqual("testing", file_contents)
def testAtomicWriteStringToFile(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.atomic_write_string_to_file(file_path, "testing")
self.assertTrue(file_io.file_exists(file_path))
file_contents = file_io.read_file_to_string(file_path)
self.assertEqual("testing", file_contents)
def testReadBinaryMode(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.write_string_to_file(file_path, "testing")
with file_io.FileIO(file_path, mode="rb") as f:
self.assertEqual(b"testing", f.read())
def testWriteBinaryMode(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, "wb").write("testing")
with file_io.FileIO(file_path, mode="r") as f:
self.assertEqual("testing", f.read())
def testAppend(self):
file_path = os.path.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="w") as f:
f.write("begin\n")
with file_io.FileIO(file_path, mode="a") as f:
f.write("a1\n")
with file_io.FileIO(file_path, mode="a") as f:
f.write("a2\n")
with file_io.FileIO(file_path, mode="r") as f:
file_contents = f.read()
self.assertEqual("begin\na1\na2\n", file_contents)
def testMultipleFiles(self):
file_prefix = os.path.join(self._base_dir, "temp_file")
for i in range(5000):
f = file_io.FileIO(file_prefix + str(i), mode="w+")
f.write("testing")
f.flush()
self.assertEqual("testing", f.read())
f.close()
def testMultipleWrites(self):
file_path = os.path.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="w") as f:
f.write("line1\n")
f.write("line2")
file_contents = file_io.read_file_to_string(file_path)
self.assertEqual("line1\nline2", file_contents)
def testFileWriteBadMode(self):
file_path = os.path.join(self._base_dir, "temp_file")
with self.assertRaises(errors.PermissionDeniedError):
file_io.FileIO(file_path, mode="r").write("testing")
def testFileReadBadMode(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
self.assertTrue(file_io.file_exists(file_path))
with self.assertRaises(errors.PermissionDeniedError):
file_io.FileIO(file_path, mode="w").read()
def testFileDelete(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
file_io.delete_file(file_path)
self.assertFalse(file_io.file_exists(file_path))
def testFileDeleteFail(self):
file_path = os.path.join(self._base_dir, "temp_file")
with self.assertRaises(errors.NotFoundError):
file_io.delete_file(file_path)
def testGetMatchingFiles(self):
dir_path = os.path.join(self._base_dir, "temp_dir")
file_io.create_dir(dir_path)
files = ["file1.txt", "file2.txt", "file3.txt"]
for name in files:
file_path = os.path.join(dir_path, name)
file_io.FileIO(file_path, mode="w").write("testing")
expected_match = [os.path.join(dir_path, name) for name in files]
self.assertItemsEqual(
file_io.get_matching_files(os.path.join(dir_path, "file*.txt")),
expected_match)
self.assertItemsEqual(file_io.get_matching_files(tuple()), [])
files_subset = [
os.path.join(dir_path, files[0]), os.path.join(dir_path, files[2])
]
self.assertItemsEqual(
file_io.get_matching_files(files_subset), files_subset)
file_io.delete_recursively(dir_path)
self.assertFalse(file_io.file_exists(os.path.join(dir_path, "file3.txt")))
def testCreateRecursiveDir(self):
dir_path = os.path.join(self._base_dir, "temp_dir/temp_dir1/temp_dir2")
file_io.recursive_create_dir(dir_path)
file_io.recursive_create_dir(dir_path) # repeat creation
file_path = os.path.join(dir_path, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
self.assertTrue(file_io.file_exists(file_path))
file_io.delete_recursively(os.path.join(self._base_dir, "temp_dir"))
self.assertFalse(file_io.file_exists(file_path))
def testCopy(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
copy_path = os.path.join(self._base_dir, "copy_file")
file_io.copy(file_path, copy_path)
self.assertTrue(file_io.file_exists(copy_path))
f = file_io.FileIO(file_path, mode="r")
self.assertEqual("testing", f.read())
self.assertEqual(7, f.tell())
def testCopyOverwrite(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
copy_path = os.path.join(self._base_dir, "copy_file")
file_io.FileIO(copy_path, mode="w").write("copy")
file_io.copy(file_path, copy_path, overwrite=True)
self.assertTrue(file_io.file_exists(copy_path))
self.assertEqual("testing", file_io.FileIO(file_path, mode="r").read())
def testCopyOverwriteFalse(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
copy_path = os.path.join(self._base_dir, "copy_file")
file_io.FileIO(copy_path, mode="w").write("copy")
with self.assertRaises(errors.AlreadyExistsError):
file_io.copy(file_path, copy_path, overwrite=False)
def testRename(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
rename_path = os.path.join(self._base_dir, "rename_file")
file_io.rename(file_path, rename_path)
self.assertTrue(file_io.file_exists(rename_path))
self.assertFalse(file_io.file_exists(file_path))
def testRenameOverwrite(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
rename_path = os.path.join(self._base_dir, "rename_file")
file_io.FileIO(rename_path, mode="w").write("rename")
file_io.rename(file_path, rename_path, overwrite=True)
self.assertTrue(file_io.file_exists(rename_path))
self.assertFalse(file_io.file_exists(file_path))
def testRenameOverwriteFalse(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
rename_path = os.path.join(self._base_dir, "rename_file")
file_io.FileIO(rename_path, mode="w").write("rename")
with self.assertRaises(errors.AlreadyExistsError):
file_io.rename(file_path, rename_path, overwrite=False)
self.assertTrue(file_io.file_exists(rename_path))
self.assertTrue(file_io.file_exists(file_path))
def testDeleteRecursivelyFail(self):
fake_dir_path = os.path.join(self._base_dir, "temp_dir")
with self.assertRaises(errors.NotFoundError):
file_io.delete_recursively(fake_dir_path)
def testIsDirectory(self):
dir_path = os.path.join(self._base_dir, "test_dir")
# Failure for a non-existing dir.
self.assertFalse(file_io.is_directory(dir_path))
file_io.create_dir(dir_path)
self.assertTrue(file_io.is_directory(dir_path))
file_path = os.path.join(dir_path, "test_file")
file_io.FileIO(file_path, mode="w").write("test")
# False for a file.
self.assertFalse(file_io.is_directory(file_path))
# Test that the value returned from `stat()` has `is_directory` set.
file_statistics = file_io.stat(dir_path)
self.assertTrue(file_statistics.is_directory)
def testListDirectory(self):
dir_path = os.path.join(self._base_dir, "test_dir")
file_io.create_dir(dir_path)
files = ["file1.txt", "file2.txt", "file3.txt"]
for name in files:
file_path = os.path.join(dir_path, name)
file_io.FileIO(file_path, mode="w").write("testing")
subdir_path = os.path.join(dir_path, "sub_dir")
file_io.create_dir(subdir_path)
subdir_file_path = os.path.join(subdir_path, "file4.txt")
file_io.FileIO(subdir_file_path, mode="w").write("testing")
dir_list = file_io.list_directory(dir_path)
self.assertItemsEqual(files + ["sub_dir"], dir_list)
def testListDirectoryFailure(self):
dir_path = os.path.join(self._base_dir, "test_dir")
with self.assertRaises(errors.NotFoundError):
file_io.list_directory(dir_path)
def _setupWalkDirectories(self, dir_path):
# Creating a file structure as follows
# test_dir -> file: file1.txt; dirs: subdir1_1, subdir1_2, subdir1_3
# subdir1_1 -> file: file3.txt
# subdir1_2 -> dir: subdir2
file_io.create_dir(dir_path)
file_io.FileIO(
os.path.join(dir_path, "file1.txt"), mode="w").write("testing")
sub_dirs1 = ["subdir1_1", "subdir1_2", "subdir1_3"]
for name in sub_dirs1:
file_io.create_dir(os.path.join(dir_path, name))
file_io.FileIO(
os.path.join(dir_path, "subdir1_1/file2.txt"),
mode="w").write("testing")
file_io.create_dir(os.path.join(dir_path, "subdir1_2/subdir2"))
def testWalkInOrder(self):
dir_path = os.path.join(self._base_dir, "test_dir")
self._setupWalkDirectories(dir_path)
# Now test the walk (in_order = True)
all_dirs = []
all_subdirs = []
all_files = []
for (w_dir, w_subdirs, w_files) in file_io.walk(dir_path, in_order=True):
all_dirs.append(w_dir)
all_subdirs.append(w_subdirs)
all_files.append(w_files)
self.assertItemsEqual(all_dirs, [dir_path] + [
os.path.join(dir_path, item)
for item in
["subdir1_1", "subdir1_2", "subdir1_2/subdir2", "subdir1_3"]
])
self.assertEqual(dir_path, all_dirs[0])
self.assertLess(
all_dirs.index(os.path.join(dir_path, "subdir1_2")),
all_dirs.index(os.path.join(dir_path, "subdir1_2/subdir2")))
self.assertItemsEqual(all_subdirs[1:5], [[], ["subdir2"], [], []])
self.assertItemsEqual(all_subdirs[0],
["subdir1_1", "subdir1_2", "subdir1_3"])
self.assertItemsEqual(all_files, [["file1.txt"], ["file2.txt"], [], [], []])
self.assertLess(
all_files.index(["file1.txt"]), all_files.index(["file2.txt"]))
def testWalkPostOrder(self):
dir_path = os.path.join(self._base_dir, "test_dir")
self._setupWalkDirectories(dir_path)
# Now test the walk (in_order = False)
all_dirs = []
all_subdirs = []
all_files = []
for (w_dir, w_subdirs, w_files) in file_io.walk(dir_path, in_order=False):
all_dirs.append(w_dir)
all_subdirs.append(w_subdirs)
all_files.append(w_files)
self.assertItemsEqual(all_dirs, [
os.path.join(dir_path, item)
for item in
["subdir1_1", "subdir1_2/subdir2", "subdir1_2", "subdir1_3"]
] + [dir_path])
self.assertEqual(dir_path, all_dirs[4])
self.assertLess(
all_dirs.index(os.path.join(dir_path, "subdir1_2/subdir2")),
all_dirs.index(os.path.join(dir_path, "subdir1_2")))
self.assertItemsEqual(all_subdirs[0:4], [[], [], ["subdir2"], []])
self.assertItemsEqual(all_subdirs[4],
["subdir1_1", "subdir1_2", "subdir1_3"])
self.assertItemsEqual(all_files, [["file2.txt"], [], [], [], ["file1.txt"]])
self.assertLess(
all_files.index(["file2.txt"]), all_files.index(["file1.txt"]))
def testWalkFailure(self):
dir_path = os.path.join(self._base_dir, "test_dir")
# Try walking a directory that wasn't created.
all_dirs = []
all_subdirs = []
all_files = []
for (w_dir, w_subdirs, w_files) in file_io.walk(dir_path, in_order=False):
all_dirs.append(w_dir)
all_subdirs.append(w_subdirs)
all_files.append(w_files)
self.assertItemsEqual(all_dirs, [])
self.assertItemsEqual(all_subdirs, [])
self.assertItemsEqual(all_files, [])
def testStat(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
file_statistics = file_io.stat(file_path)
os_statistics = os.stat(file_path)
self.assertEqual(7, file_statistics.length)
self.assertEqual(
int(os_statistics.st_mtime), int(file_statistics.mtime_nsec / 1e9))
self.assertFalse(file_statistics.is_directory)
def testReadLine(self):
file_path = os.path.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="r+") as f:
f.write("testing1\ntesting2\ntesting3\n\ntesting5")
self.assertEqual(36, f.size())
self.assertEqual("testing1\n", f.readline())
self.assertEqual("testing2\n", f.readline())
self.assertEqual("testing3\n", f.readline())
self.assertEqual("\n", f.readline())
self.assertEqual("testing5", f.readline())
self.assertEqual("", f.readline())
def testRead(self):
file_path = os.path.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="r+") as f:
f.write("testing1\ntesting2\ntesting3\n\ntesting5")
self.assertEqual(36, f.size())
self.assertEqual("testing1\n", f.read(9))
self.assertEqual("testing2\n", f.read(9))
self.assertEqual("t", f.read(1))
self.assertEqual("esting3\n\ntesting5", f.read())
def testTell(self):
file_path = os.path.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="r+") as f:
f.write("testing1\ntesting2\ntesting3\n\ntesting5")
self.assertEqual(0, f.tell())
self.assertEqual("testing1\n", f.readline())
self.assertEqual(9, f.tell())
self.assertEqual("testing2\n", f.readline())
self.assertEqual(18, f.tell())
self.assertEqual("testing3\n", f.readline())
self.assertEqual(27, f.tell())
self.assertEqual("\n", f.readline())
self.assertEqual(28, f.tell())
self.assertEqual("testing5", f.readline())
self.assertEqual(36, f.tell())
self.assertEqual("", f.readline())
self.assertEqual(36, f.tell())
def testSeek(self):
file_path = os.path.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="r+") as f:
f.write("testing1\ntesting2\ntesting3\n\ntesting5")
self.assertEqual("testing1\n", f.readline())
self.assertEqual(9, f.tell())
# Seek to 18
f.seek(18)
self.assertEqual(18, f.tell())
self.assertEqual("testing3\n", f.readline())
# Seek back to 9
f.seek(9)
self.assertEqual(9, f.tell())
self.assertEqual("testing2\n", f.readline())
f.seek(0)
self.assertEqual(0, f.tell())
self.assertEqual("testing1\n", f.readline())
with self.assertRaises(errors.InvalidArgumentError):
f.seek(-1)
with self.assertRaises(TypeError):
f.seek()
# TODO(jhseu): Delete after position deprecation.
with self.assertRaises(TypeError):
f.seek(offset=0, position=0)
f.seek(position=9)
self.assertEqual(9, f.tell())
self.assertEqual("testing2\n", f.readline())
def testSeekFromWhat(self):
file_path = os.path.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="r+") as f:
f.write("testing1\ntesting2\ntesting3\n\ntesting5")
self.assertEqual("testing1\n", f.readline())
self.assertEqual(9, f.tell())
# Seek to 18
f.seek(9, 1)
self.assertEqual(18, f.tell())
self.assertEqual("testing3\n", f.readline())
# Seek back to 9
f.seek(9, 0)
self.assertEqual(9, f.tell())
self.assertEqual("testing2\n", f.readline())
f.seek(-f.size(), 2)
self.assertEqual(0, f.tell())
self.assertEqual("testing1\n", f.readline())
with self.assertRaises(errors.InvalidArgumentError):
f.seek(0, 3)
def testReadingIterator(self):
file_path = os.path.join(self._base_dir, "temp_file")
data = ["testing1\n", "testing2\n", "testing3\n", "\n", "testing5"]
with file_io.FileIO(file_path, mode="r+") as f:
f.write("".join(data))
actual_data = []
for line in f:
actual_data.append(line)
self.assertSequenceEqual(actual_data, data)
def testReadlines(self):
file_path = os.path.join(self._base_dir, "temp_file")
data = ["testing1\n", "testing2\n", "testing3\n", "\n", "testing5"]
f = file_io.FileIO(file_path, mode="r+")
f.write("".join(data))
f.flush()
lines = f.readlines()
self.assertSequenceEqual(lines, data)
def testEof(self):
"""Test that reading past EOF does not raise an exception."""
file_path = os.path.join(self._base_dir, "temp_file")
f = file_io.FileIO(file_path, mode="r+")
content = "testing"
f.write(content)
f.flush()
self.assertEqual(content, f.read(len(content) + 1))
if __name__ == "__main__":
test.main()
|
supersven/intellij-community
|
refs/heads/master
|
python/helpers/docutils/parsers/rst/languages/de.py
|
57
|
# $Id: de.py 5174 2007-05-31 00:01:52Z wiemann $
# Authors: Engelbert Gruber <grubert@users.sourceforge.net>;
# Lea Wiemann <LeWiemann@gmail.com>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
German-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
'achtung': 'attention',
'vorsicht': 'caution',
'gefahr': 'danger',
'fehler': 'error',
'hinweis': 'hint',
'wichtig': 'important',
'notiz': 'note',
'tipp': 'tip',
'warnung': 'warning',
'ermahnung': 'admonition',
'kasten': 'sidebar',
'seitenkasten': 'sidebar',
'thema': 'topic',
'zeilen-block': 'line-block',
'parsed-literal (translation required)': 'parsed-literal',
'rubrik': 'rubric',
'epigraph': 'epigraph',
'highlights (translation required)': 'highlights',
'pull-quote (translation required)': 'pull-quote', # kasten too ?
'zusammengesetzt': 'compound',
'verbund': 'compound',
u'container (translation required)': 'container',
#'fragen': 'questions',
'tabelle': 'table',
'csv-tabelle': 'csv-table',
'list-table (translation required)': 'list-table',
'meta': 'meta',
#'imagemap': 'imagemap',
'bild': 'image',
'abbildung': 'figure',
u'unver\xe4ndert': 'raw',
u'roh': 'raw',
u'einf\xfcgen': 'include',
'ersetzung': 'replace',
'ersetzen': 'replace',
'ersetze': 'replace',
'unicode': 'unicode',
'datum': 'date',
'klasse': 'class',
'rolle': 'role',
u'default-role (translation required)': 'default-role',
u'title (translation required)': 'title',
'inhalt': 'contents',
'kapitel-nummerierung': 'sectnum',
'abschnitts-nummerierung': 'sectnum',
u'linkziel-fu\xdfnoten': 'target-notes',
u'header (translation required)': 'header',
u'footer (translation required)': 'footer',
#u'fu\xdfnoten': 'footnotes',
#'zitate': 'citations',
}
"""German name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
u'abk\xfcrzung': 'abbreviation',
'akronym': 'acronym',
'index': 'index',
'tiefgestellt': 'subscript',
'hochgestellt': 'superscript',
'titel-referenz': 'title-reference',
'pep-referenz': 'pep-reference',
'rfc-referenz': 'rfc-reference',
'betonung': 'emphasis',
'fett': 'strong',
u'w\xf6rtlich': 'literal',
'benannte-referenz': 'named-reference',
'unbenannte-referenz': 'anonymous-reference',
u'fu\xdfnoten-referenz': 'footnote-reference',
'zitat-referenz': 'citation-reference',
'ersetzungs-referenz': 'substitution-reference',
'ziel': 'target',
'uri-referenz': 'uri-reference',
u'unver\xe4ndert': 'raw',
u'roh': 'raw',}
"""Mapping of German role names to canonical role names for interpreted text.
"""
|
plissonf/scikit-learn
|
refs/heads/master
|
sklearn/neighbors/approximate.py
|
71
|
"""Approximate nearest neighbor search"""
# Author: Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk>
# Joel Nothman <joel.nothman@gmail.com>
import numpy as np
import warnings
from scipy import sparse
from .base import KNeighborsMixin, RadiusNeighborsMixin
from ..base import BaseEstimator
from ..utils.validation import check_array
from ..utils import check_random_state
from ..metrics.pairwise import pairwise_distances
from ..random_projection import GaussianRandomProjection
__all__ = ["LSHForest"]
HASH_DTYPE = '>u4'
MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8
def _find_matching_indices(tree, bin_X, left_mask, right_mask):
"""Finds indices in sorted array of integers.
Most significant h bits in the binary representations of the
integers are matched with the items' most significant h bits.
"""
left_index = np.searchsorted(tree, bin_X & left_mask)
right_index = np.searchsorted(tree, bin_X | right_mask,
side='right')
return left_index, right_index
def _find_longest_prefix_match(tree, bin_X, hash_size,
left_masks, right_masks):
"""Find the longest prefix match in tree for each query in bin_X
Most significant bits are considered as the prefix.
"""
hi = np.empty_like(bin_X, dtype=np.intp)
hi.fill(hash_size)
lo = np.zeros_like(bin_X, dtype=np.intp)
res = np.empty_like(bin_X, dtype=np.intp)
left_idx, right_idx = _find_matching_indices(tree, bin_X,
left_masks[hi],
right_masks[hi])
found = right_idx > left_idx
res[found] = lo[found] = hash_size
r = np.arange(bin_X.shape[0])
kept = r[lo < hi] # indices remaining in bin_X mask
while kept.shape[0]:
mid = (lo.take(kept) + hi.take(kept)) // 2
left_idx, right_idx = _find_matching_indices(tree,
bin_X.take(kept),
left_masks[mid],
right_masks[mid])
found = right_idx > left_idx
mid_found = mid[found]
lo[kept[found]] = mid_found + 1
res[kept[found]] = mid_found
hi[kept[~found]] = mid[~found]
kept = r[lo < hi]
return res
class ProjectionToHashMixin(object):
"""Turn a transformed real-valued array into a hash"""
@staticmethod
def _to_hash(projected):
if projected.shape[1] % 8 != 0:
raise ValueError('Require reduced dimensionality to be a multiple '
'of 8 for hashing')
# XXX: perhaps non-copying operation better
out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE)
return out.reshape(projected.shape[0], -1)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def transform(self, X, y=None):
return self._to_hash(super(ProjectionToHashMixin, self).transform(X))
class GaussianRandomProjectionHash(ProjectionToHashMixin,
GaussianRandomProjection):
"""Use GaussianRandomProjection to produce a cosine LSH fingerprint"""
def __init__(self,
n_components=8,
random_state=None):
super(GaussianRandomProjectionHash, self).__init__(
n_components=n_components,
random_state=random_state)
def _array_of_arrays(list_of_arrays):
"""Creates an array of array from list of arrays."""
out = np.empty(len(list_of_arrays), dtype=object)
out[:] = list_of_arrays
return out
class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin):
"""Performs approximate nearest neighbor search using LSH forest.
LSH Forest: Locality Sensitive Hashing forest [1] is an alternative
method for vanilla approximate nearest neighbor search methods.
LSH forest data structure has been implemented using sorted
arrays and binary search and 32 bit fixed-length hashes.
Random projection is used as the hash family which approximates
cosine distance.
The cosine distance is defined as ``1 - cosine_similarity``: the lowest
value is 0 (identical point) but it is bounded above by 2 for the farthest
points. Its value does not depend on the norm of the vector points but
only on their relative angles.
Read more in the :ref:`User Guide <approximate_nearest_neighbors>`.
Parameters
----------
n_estimators : int (default = 10)
Number of trees in the LSH Forest.
min_hash_match : int (default = 4)
lowest hash length to be searched when candidate selection is
performed for nearest neighbors.
n_candidates : int (default = 10)
Minimum number of candidates evaluated per estimator, assuming enough
items meet the `min_hash_match` constraint.
n_neighbors : int (default = 5)
Number of neighbors to be returned from query function when
it is not provided to the :meth:`kneighbors` method.
radius : float, optinal (default = 1.0)
Radius from the data point to its neighbors. This is the parameter
space to use by default for the :meth`radius_neighbors` queries.
radius_cutoff_ratio : float, optional (default = 0.9)
A value ranges from 0 to 1. Radius neighbors will be searched until
the ratio between total neighbors within the radius and the total
candidates becomes less than this value unless it is terminated by
hash length reaching `min_hash_match`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
hash_functions_ : list of GaussianRandomProjectionHash objects
Hash function g(p,x) for a tree is an array of 32 randomly generated
float arrays with the same dimenstion as the data set. This array is
stored in GaussianRandomProjectionHash object and can be obtained
from ``components_`` attribute.
trees_ : array, shape (n_estimators, n_samples)
Each tree (corresponding to a hash function) contains an array of
sorted hashed values. The array representation may change in future
versions.
original_indices_ : array, shape (n_estimators, n_samples)
Original indices of sorted hashed values in the fitted index.
References
----------
.. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning
Indexes for Similarity Search", WWW '05 Proceedings of the
14th international conference on World Wide Web, 651-660,
2005.
Examples
--------
>>> from sklearn.neighbors import LSHForest
>>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]]
>>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]]
>>> lshf = LSHForest()
>>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE
LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10,
n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9,
random_state=None)
>>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2)
>>> distances # doctest: +ELLIPSIS
array([[ 0.069..., 0.149...],
[ 0.229..., 0.481...],
[ 0.004..., 0.014...]])
>>> indices
array([[1, 2],
[2, 0],
[4, 0]])
"""
def __init__(self, n_estimators=10, radius=1.0, n_candidates=50,
n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9,
random_state=None):
self.n_estimators = n_estimators
self.radius = radius
self.random_state = random_state
self.n_candidates = n_candidates
self.n_neighbors = n_neighbors
self.min_hash_match = min_hash_match
self.radius_cutoff_ratio = radius_cutoff_ratio
def _compute_distances(self, query, candidates):
"""Computes the cosine distance.
Distance is from the query to points in the candidates array.
Returns argsort of distances in the candidates
array and sorted distances.
"""
if candidates.shape == (0,):
# needed since _fit_X[np.array([])] doesn't work if _fit_X sparse
return np.empty(0, dtype=np.int), np.empty(0, dtype=float)
if sparse.issparse(self._fit_X):
candidate_X = self._fit_X[candidates]
else:
candidate_X = self._fit_X.take(candidates, axis=0, mode='clip')
distances = pairwise_distances(query, candidate_X,
metric='cosine')[0]
distance_positions = np.argsort(distances)
distances = distances.take(distance_positions, mode='clip', axis=0)
return distance_positions, distances
def _generate_masks(self):
"""Creates left and right masks for all hash lengths."""
tri_size = MAX_HASH_SIZE + 1
# Called once on fitting, output is independent of hashes
left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:]
right_mask = left_mask[::-1, ::-1]
self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE)
self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
def _get_candidates(self, query, max_depth, bin_queries, n_neighbors):
"""Performs the Synchronous ascending phase.
Returns an array of candidates, their distance ranks and
distances.
"""
index_size = self._fit_X.shape[0]
# Number of candidates considered including duplicates
# XXX: not sure whether this is being calculated correctly wrt
# duplicates from different iterations through a single tree
n_candidates = 0
candidate_set = set()
min_candidates = self.n_candidates * self.n_estimators
while (max_depth > self.min_hash_match and
(n_candidates < min_candidates or
len(candidate_set) < n_neighbors)):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
n_candidates += stop - start
candidate_set.update(
self.original_indices_[i][start:stop].tolist())
max_depth -= 1
candidates = np.fromiter(candidate_set, count=len(candidate_set),
dtype=np.intp)
# For insufficient candidates, candidates are filled.
# Candidates are filled from unselected indices uniformly.
if candidates.shape[0] < n_neighbors:
warnings.warn(
"Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (n_neighbors, self.min_hash_match))
remaining = np.setdiff1d(np.arange(0, index_size), candidates)
to_fill = n_neighbors - candidates.shape[0]
candidates = np.concatenate((candidates, remaining[:to_fill]))
ranks, distances = self._compute_distances(query,
candidates.astype(int))
return (candidates[ranks[:n_neighbors]],
distances[:n_neighbors])
def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):
"""Finds radius neighbors from the candidates obtained.
Their distances from query are smaller than radius.
Returns radius neighbors and distances.
"""
ratio_within_radius = 1
threshold = 1 - self.radius_cutoff_ratio
total_candidates = np.array([], dtype=int)
total_neighbors = np.array([], dtype=int)
total_distances = np.array([], dtype=float)
while (max_depth > self.min_hash_match and
ratio_within_radius > threshold):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
candidates = []
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
candidates.extend(
self.original_indices_[i][start:stop].tolist())
candidates = np.setdiff1d(candidates, total_candidates)
total_candidates = np.append(total_candidates, candidates)
ranks, distances = self._compute_distances(query, candidates)
m = np.searchsorted(distances, radius, side='right')
positions = np.searchsorted(total_distances, distances[:m])
total_neighbors = np.insert(total_neighbors, positions,
candidates[ranks[:m]])
total_distances = np.insert(total_distances, positions,
distances[:m])
ratio_within_radius = (total_neighbors.shape[0] /
float(total_candidates.shape[0]))
max_depth = max_depth - 1
return total_neighbors, total_distances
def fit(self, X, y=None):
"""Fit the LSH forest on the data.
This creates binary hashes of input data points by getting the
dot product of input points and hash_function then
transforming the projection into a binary string array based
on the sign (positive/negative) of the projection.
A sorted array of binary hashes is created.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self : object
Returns self.
"""
self._fit_X = check_array(X, accept_sparse='csr')
# Creates a g(p,x) for each tree
self.hash_functions_ = []
self.trees_ = []
self.original_indices_ = []
rng = check_random_state(self.random_state)
int_max = np.iinfo(np.int32).max
for i in range(self.n_estimators):
# This is g(p,x) for a particular tree.
# Builds a single tree. Hashing is done on an array of data points.
# `GaussianRandomProjection` is used for hashing.
# `n_components=hash size and n_features=n_dim.
hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE,
rng.randint(0, int_max))
hashes = hasher.fit_transform(self._fit_X)[:, 0]
original_index = np.argsort(hashes)
bin_hashes = hashes[original_index]
self.original_indices_.append(original_index)
self.trees_.append(bin_hashes)
self.hash_functions_.append(hasher)
self._generate_masks()
return self
def _query(self, X):
"""Performs descending phase to find maximum depth."""
# Calculate hashes of shape (n_samples, n_estimators, [hash_size])
bin_queries = np.asarray([hasher.transform(X)[:, 0]
for hasher in self.hash_functions_])
bin_queries = np.rollaxis(bin_queries, 1)
# descend phase
depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE,
self._left_mask, self._right_mask)
for tree, tree_queries in zip(self.trees_,
np.rollaxis(bin_queries, 1))]
return bin_queries, np.max(depths, axis=0)
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Returns n_neighbors of approximate nearest neighbors.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
n_neighbors : int, opitonal (default = None)
Number of neighbors required. If not provided, this will
return the number specified at the initialization.
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples, n_neighbors)
Array representing the cosine distances to each point,
only present if return_distance=True.
ind : array, shape (n_samples, n_neighbors)
Indices of the approximate nearest points in the population
matrix.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_candidates(X[[i]], max_depth[i],
bin_queries[i],
n_neighbors)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return np.array(distances), np.array(neighbors)
else:
return np.array(neighbors)
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of some points from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
LSH Forest being an approximate method, some true neighbors from the
indexed dataset might be missing from the results.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples,) of arrays
Each element is an array representing the cosine distances
to some points found within ``radius`` of the respective query.
Only present if ``return_distance=True``.
ind : array, shape (n_samples,) of arrays
Each element is an array of indices for neighbors within ``radius``
of the respective query.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if radius is None:
radius = self.radius
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_radius_neighbors(X[[i]], max_depth[i],
bin_queries[i], radius)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return _array_of_arrays(distances), _array_of_arrays(neighbors)
else:
return _array_of_arrays(neighbors)
def partial_fit(self, X, y=None):
"""
Inserts new data into the already fitted LSH Forest.
Cost is proportional to new total size, so additions
should be batched.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
New data point to be inserted into the LSH Forest.
"""
X = check_array(X, accept_sparse='csr')
if not hasattr(self, 'hash_functions_'):
return self.fit(X)
if X.shape[1] != self._fit_X.shape[1]:
raise ValueError("Number of features in X and"
" fitted array does not match.")
n_samples = X.shape[0]
n_indexed = self._fit_X.shape[0]
for i in range(self.n_estimators):
bin_X = self.hash_functions_[i].transform(X)[:, 0]
# gets the position to be added in the tree.
positions = self.trees_[i].searchsorted(bin_X)
# adds the hashed value into the tree.
self.trees_[i] = np.insert(self.trees_[i],
positions, bin_X)
# add the entry into the original_indices_.
self.original_indices_[i] = np.insert(self.original_indices_[i],
positions,
np.arange(n_indexed,
n_indexed +
n_samples))
# adds the entry into the input_array.
if sparse.issparse(X) or sparse.issparse(self._fit_X):
self._fit_X = sparse.vstack((self._fit_X, X))
else:
self._fit_X = np.row_stack((self._fit_X, X))
return self
|
aperigault/ansible
|
refs/heads/devel
|
lib/ansible/plugins/filter/urlsplit.py
|
146
|
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
from ansible.errors import AnsibleFilterError
from ansible.module_utils.six.moves.urllib.parse import urlsplit
from ansible.utils import helpers
def split_url(value, query='', alias='urlsplit'):
results = helpers.object_to_dict(urlsplit(value), exclude=['count', 'index', 'geturl', 'encode'])
# If a query is supplied, make sure it's valid then return the results.
# If no option is supplied, return the entire dictionary.
if query:
if query not in results:
raise AnsibleFilterError(alias + ': unknown URL component: %s' % query)
return results[query]
else:
return results
# ---- Ansible filters ----
class FilterModule(object):
''' URI filter '''
def filters(self):
return {
'urlsplit': split_url
}
|
Hellowlol/plexpy
|
refs/heads/master
|
lib/mutagen/id3/_specs.py
|
22
|
# -*- coding: utf-8 -*-
# Copyright (C) 2005 Michael Urman
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
import struct
from struct import unpack, pack
from warnings import warn
from .._compat import text_type, chr_, PY3, swap_to_string, string_types
from .._util import total_ordering, decode_terminated, enum
from ._util import ID3JunkFrameError, ID3Warning, BitPaddedInt
class Spec(object):
def __init__(self, name):
self.name = name
def __hash__(self):
raise TypeError("Spec objects are unhashable")
def _validate23(self, frame, value, **kwargs):
"""Return a possibly modified value which, if written,
results in valid id3v2.3 data.
"""
return value
def read(self, frame, value):
raise NotImplementedError
def write(self, frame, value):
raise NotImplementedError
def validate(self, frame, value):
"""Returns the validated data or raises ValueError/TypeError"""
raise NotImplementedError
class ByteSpec(Spec):
def read(self, frame, data):
return bytearray(data)[0], data[1:]
def write(self, frame, value):
return chr_(value)
def validate(self, frame, value):
if value is not None:
chr_(value)
return value
class IntegerSpec(Spec):
def read(self, frame, data):
return int(BitPaddedInt(data, bits=8)), b''
def write(self, frame, value):
return BitPaddedInt.to_str(value, bits=8, width=-1)
def validate(self, frame, value):
return value
class SizedIntegerSpec(Spec):
def __init__(self, name, size):
self.name, self.__sz = name, size
def read(self, frame, data):
return int(BitPaddedInt(data[:self.__sz], bits=8)), data[self.__sz:]
def write(self, frame, value):
return BitPaddedInt.to_str(value, bits=8, width=self.__sz)
def validate(self, frame, value):
return value
@enum
class Encoding(object):
LATIN1 = 0
UTF16 = 1
UTF16BE = 2
UTF8 = 3
class EncodingSpec(ByteSpec):
def read(self, frame, data):
enc, data = super(EncodingSpec, self).read(frame, data)
if enc < 16:
return enc, data
else:
return 0, chr_(enc) + data
def validate(self, frame, value):
if value is None:
return None
if 0 <= value <= 3:
return value
raise ValueError('Invalid Encoding: %r' % value)
def _validate23(self, frame, value, **kwargs):
# only 0, 1 are valid in v2.3, default to utf-16
return min(1, value)
class StringSpec(Spec):
"""A fixed size ASCII only payload."""
def __init__(self, name, length):
super(StringSpec, self).__init__(name)
self.len = length
def read(s, frame, data):
chunk = data[:s.len]
try:
ascii = chunk.decode("ascii")
except UnicodeDecodeError:
raise ID3JunkFrameError("not ascii")
else:
if PY3:
chunk = ascii
return chunk, data[s.len:]
def write(s, frame, value):
if value is None:
return b'\x00' * s.len
else:
if PY3:
value = value.encode("ascii")
return (bytes(value) + b'\x00' * s.len)[:s.len]
def validate(s, frame, value):
if value is None:
return None
if PY3:
if not isinstance(value, str):
raise TypeError("%s has to be str" % s.name)
value.encode("ascii")
else:
if not isinstance(value, bytes):
value = value.encode("ascii")
if len(value) == s.len:
return value
raise ValueError('Invalid StringSpec[%d] data: %r' % (s.len, value))
class BinaryDataSpec(Spec):
def read(self, frame, data):
return data, b''
def write(self, frame, value):
if value is None:
return b""
if isinstance(value, bytes):
return value
value = text_type(value).encode("ascii")
return value
def validate(self, frame, value):
if value is None:
return None
if isinstance(value, bytes):
return value
elif PY3:
raise TypeError("%s has to be bytes" % self.name)
value = text_type(value).encode("ascii")
return value
class EncodedTextSpec(Spec):
# Okay, seriously. This is private and defined explicitly and
# completely by the ID3 specification. You can't just add
# encodings here however you want.
_encodings = (
('latin1', b'\x00'),
('utf16', b'\x00\x00'),
('utf_16_be', b'\x00\x00'),
('utf8', b'\x00')
)
def read(self, frame, data):
enc, term = self._encodings[frame.encoding]
try:
# allow missing termination
return decode_terminated(data, enc, strict=False)
except ValueError:
# utf-16 termination with missing BOM, or single NULL
if not data[:len(term)].strip(b"\x00"):
return u"", data[len(term):]
# utf-16 data with single NULL, see issue 169
try:
return decode_terminated(data + b"\x00", enc)
except ValueError:
raise ID3JunkFrameError
def write(self, frame, value):
enc, term = self._encodings[frame.encoding]
return value.encode(enc) + term
def validate(self, frame, value):
return text_type(value)
class MultiSpec(Spec):
def __init__(self, name, *specs, **kw):
super(MultiSpec, self).__init__(name)
self.specs = specs
self.sep = kw.get('sep')
def read(self, frame, data):
values = []
while data:
record = []
for spec in self.specs:
value, data = spec.read(frame, data)
record.append(value)
if len(self.specs) != 1:
values.append(record)
else:
values.append(record[0])
return values, data
def write(self, frame, value):
data = []
if len(self.specs) == 1:
for v in value:
data.append(self.specs[0].write(frame, v))
else:
for record in value:
for v, s in zip(record, self.specs):
data.append(s.write(frame, v))
return b''.join(data)
def validate(self, frame, value):
if value is None:
return []
if self.sep and isinstance(value, string_types):
value = value.split(self.sep)
if isinstance(value, list):
if len(self.specs) == 1:
return [self.specs[0].validate(frame, v) for v in value]
else:
return [
[s.validate(frame, v) for (v, s) in zip(val, self.specs)]
for val in value]
raise ValueError('Invalid MultiSpec data: %r' % value)
def _validate23(self, frame, value, **kwargs):
if len(self.specs) != 1:
return [[s._validate23(frame, v, **kwargs)
for (v, s) in zip(val, self.specs)]
for val in value]
spec = self.specs[0]
# Merge single text spec multispecs only.
# (TimeStampSpec beeing the exception, but it's not a valid v2.3 frame)
if not isinstance(spec, EncodedTextSpec) or \
isinstance(spec, TimeStampSpec):
return value
value = [spec._validate23(frame, v, **kwargs) for v in value]
if kwargs.get("sep") is not None:
return [spec.validate(frame, kwargs["sep"].join(value))]
return value
class EncodedNumericTextSpec(EncodedTextSpec):
pass
class EncodedNumericPartTextSpec(EncodedTextSpec):
pass
class Latin1TextSpec(EncodedTextSpec):
def read(self, frame, data):
if b'\x00' in data:
data, ret = data.split(b'\x00', 1)
else:
ret = b''
return data.decode('latin1'), ret
def write(self, data, value):
return value.encode('latin1') + b'\x00'
def validate(self, frame, value):
return text_type(value)
@swap_to_string
@total_ordering
class ID3TimeStamp(object):
"""A time stamp in ID3v2 format.
This is a restricted form of the ISO 8601 standard; time stamps
take the form of:
YYYY-MM-DD HH:MM:SS
Or some partial form (YYYY-MM-DD HH, YYYY, etc.).
The 'text' attribute contains the raw text data of the time stamp.
"""
import re
def __init__(self, text):
if isinstance(text, ID3TimeStamp):
text = text.text
elif not isinstance(text, text_type):
if PY3:
raise TypeError("not a str")
text = text.decode("utf-8")
self.text = text
__formats = ['%04d'] + ['%02d'] * 5
__seps = ['-', '-', ' ', ':', ':', 'x']
def get_text(self):
parts = [self.year, self.month, self.day,
self.hour, self.minute, self.second]
pieces = []
for i, part in enumerate(parts):
if part is None:
break
pieces.append(self.__formats[i] % part + self.__seps[i])
return u''.join(pieces)[:-1]
def set_text(self, text, splitre=re.compile('[-T:/.]|\s+')):
year, month, day, hour, minute, second = \
splitre.split(text + ':::::')[:6]
for a in 'year month day hour minute second'.split():
try:
v = int(locals()[a])
except ValueError:
v = None
setattr(self, a, v)
text = property(get_text, set_text, doc="ID3v2.4 date and time.")
def __str__(self):
return self.text
def __bytes__(self):
return self.text.encode("utf-8")
def __repr__(self):
return repr(self.text)
def __eq__(self, other):
return self.text == other.text
def __lt__(self, other):
return self.text < other.text
__hash__ = object.__hash__
def encode(self, *args):
return self.text.encode(*args)
class TimeStampSpec(EncodedTextSpec):
def read(self, frame, data):
value, data = super(TimeStampSpec, self).read(frame, data)
return self.validate(frame, value), data
def write(self, frame, data):
return super(TimeStampSpec, self).write(frame,
data.text.replace(' ', 'T'))
def validate(self, frame, value):
try:
return ID3TimeStamp(value)
except TypeError:
raise ValueError("Invalid ID3TimeStamp: %r" % value)
class ChannelSpec(ByteSpec):
(OTHER, MASTER, FRONTRIGHT, FRONTLEFT, BACKRIGHT, BACKLEFT, FRONTCENTRE,
BACKCENTRE, SUBWOOFER) = range(9)
class VolumeAdjustmentSpec(Spec):
def read(self, frame, data):
value, = unpack('>h', data[0:2])
return value / 512.0, data[2:]
def write(self, frame, value):
number = int(round(value * 512))
# pack only fails in 2.7, do it manually in 2.6
if not -32768 <= number <= 32767:
raise struct.error
return pack('>h', number)
def validate(self, frame, value):
if value is not None:
try:
self.write(frame, value)
except struct.error:
raise ValueError("out of range")
return value
class VolumePeakSpec(Spec):
def read(self, frame, data):
# http://bugs.xmms.org/attachment.cgi?id=113&action=view
peak = 0
data_array = bytearray(data)
bits = data_array[0]
vol_bytes = min(4, (bits + 7) >> 3)
# not enough frame data
if vol_bytes + 1 > len(data):
raise ID3JunkFrameError
shift = ((8 - (bits & 7)) & 7) + (4 - vol_bytes) * 8
for i in range(1, vol_bytes + 1):
peak *= 256
peak += data_array[i]
peak *= 2 ** shift
return (float(peak) / (2 ** 31 - 1)), data[1 + vol_bytes:]
def write(self, frame, value):
number = int(round(value * 32768))
# pack only fails in 2.7, do it manually in 2.6
if not 0 <= number <= 65535:
raise struct.error
# always write as 16 bits for sanity.
return b"\x10" + pack('>H', number)
def validate(self, frame, value):
if value is not None:
try:
self.write(frame, value)
except struct.error:
raise ValueError("out of range")
return value
class SynchronizedTextSpec(EncodedTextSpec):
def read(self, frame, data):
texts = []
encoding, term = self._encodings[frame.encoding]
while data:
try:
value, data = decode_terminated(data, encoding)
except ValueError:
raise ID3JunkFrameError
if len(data) < 4:
raise ID3JunkFrameError
time, = struct.unpack(">I", data[:4])
texts.append((value, time))
data = data[4:]
return texts, b""
def write(self, frame, value):
data = []
encoding, term = self._encodings[frame.encoding]
for text, time in value:
text = text.encode(encoding) + term
data.append(text + struct.pack(">I", time))
return b"".join(data)
def validate(self, frame, value):
return value
class KeyEventSpec(Spec):
def read(self, frame, data):
events = []
while len(data) >= 5:
events.append(struct.unpack(">bI", data[:5]))
data = data[5:]
return events, data
def write(self, frame, value):
return b"".join(struct.pack(">bI", *event) for event in value)
def validate(self, frame, value):
return value
class VolumeAdjustmentsSpec(Spec):
# Not to be confused with VolumeAdjustmentSpec.
def read(self, frame, data):
adjustments = {}
while len(data) >= 4:
freq, adj = struct.unpack(">Hh", data[:4])
data = data[4:]
freq /= 2.0
adj /= 512.0
adjustments[freq] = adj
adjustments = sorted(adjustments.items())
return adjustments, data
def write(self, frame, value):
value.sort()
return b"".join(struct.pack(">Hh", int(freq * 2), int(adj * 512))
for (freq, adj) in value)
def validate(self, frame, value):
return value
class ASPIIndexSpec(Spec):
def read(self, frame, data):
if frame.b == 16:
format = "H"
size = 2
elif frame.b == 8:
format = "B"
size = 1
else:
warn("invalid bit count in ASPI (%d)" % frame.b, ID3Warning)
return [], data
indexes = data[:frame.N * size]
data = data[frame.N * size:]
return list(struct.unpack(">" + format * frame.N, indexes)), data
def write(self, frame, values):
if frame.b == 16:
format = "H"
elif frame.b == 8:
format = "B"
else:
raise ValueError("frame.b must be 8 or 16")
return struct.pack(">" + format * frame.N, *values)
def validate(self, frame, values):
return values
|
synappio/chapman
|
refs/heads/master
|
chapman/script.py
|
1
|
import os
import time
import base64
import logging.config
from ConfigParser import ConfigParser
from docopt import docopt
from pyramid.paster import bootstrap
from formencode import validators as fev
from formencode import schema as fes
from formencode import variabledecode as fevd
from formencode import foreach as fef
from chapman import worker
CHUNKSIZE = 4096
log = logging.getLogger(__name__)
class Chapman(object):
"""Usage:
chapmand <config> [options]
Options:
-h --help show this help message and exit
-c,--concurrency THREADS number of threads to run [default: 1]
-d,--debug drop into a debugger on task errors?
-n,--name NAME override the name of the worker
"""
settings_schema = fes.Schema(
pre_validators=[fevd.NestedVariables()],
name=fev.String(),
queues=fef.ForEach(if_missing=['chapman']),
path=fev.String(),
sleep_ms=fev.Int())
def __init__(self, name, path, queues, sleep_ms):
self.name = '{}-{}'.format(name, base64.urlsafe_b64encode(os.urandom(6)))
self.path = path
self.queues = queues
self.sleep_ms = sleep_ms
@classmethod
def script(cls):
args = docopt(cls.__doc__)
config = args['<config>']
if '#' in config:
config, section = config.split('#')
else:
section = 'chapman'
_setup_logging(config)
cp = ConfigParser()
cp.read(config)
settings = dict(cp.items(section))
app_section = settings.pop('app')
app_context = bootstrap('{}#{}'.format(config, app_section))
settings = cls.settings_schema.to_python(settings)
if args['--name']:
settings['name'] = args['--name']
self = cls(**settings)
self.run(
app_context['app'],
app_context['registry'],
int(args['--concurrency']),
bool(args['--debug']))
def run(self, app, registry, concurrency, debug):
name = '{}:{}'.format(self.name, os.getpid())
log.info('Starting Chapman')
log.info(' path: %s', self.path)
log.info(' name: %s', name)
log.info(' queues: %s', self.queues)
log.info(' concurrency: %s', concurrency)
log.info(' debug: %s', debug)
log.info(' sleep_ms: %s', self.sleep_ms)
w = worker.Worker(
app=app,
name=name,
qnames=self.queues,
chapman_path=self.path,
registry=registry,
num_threads=concurrency,
sleep=self.sleep_ms / 1000.0,
raise_errors=debug)
w.start()
w.run()
def hq_ping():
args = docopt("""Usage:
chapman-hq-ping <secret> <qname>
Options:
-h --help show this help message and exit
""")
from chapman import hq
class PingListener(hq.Listener):
def __init__(self, qname, secret):
self.q0 = hq.HQueue(qname, secret)
self.q1 = hq.HQueue(qname, secret)
self.msgs = {}
super(PingListener, self).__init__(self.q1, 'listener')
def ping(self):
now = time.time()
result = self.q0.put({}).content
self.msgs[result] = now
def handle(self, id, msg):
now = time.time()
elapsed = now - self.msgs.pop(id, 0)
print 'Latency %s: %dms' % (
id, (elapsed * 1000))
listener = PingListener(args['<qname>'], args['<secret>'])
listener.start()
while True:
listener.ping()
time.sleep(1)
def _setup_logging(config_file):
'''Setup logging like pyramid.paster.setup_logging but does
NOT disable existing loggers
'''
if '#' in config_file:
path, _ = config_file.split('#')
else:
path = config_file
full_path = os.path.abspath(path)
here = os.path.dirname(full_path)
return logging.config.fileConfig(
full_path, dict(__file__=full_path, here=here),
disable_existing_loggers=False)
|
phoenix367/AAMToolbox
|
refs/heads/master
|
contribute/gtest/test/gtest_output_test.py
|
1733
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the text output of Google C++ Testing Framework.
SYNOPSIS
gtest_output_test.py --build_dir=BUILD/DIR --gengolden
# where BUILD/DIR contains the built gtest_output_test_ file.
gtest_output_test.py --gengolden
gtest_output_test.py
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sys
import gtest_test_utils
# The flag for generating the golden file
GENGOLDEN_FLAG = '--gengolden'
CATCH_EXCEPTIONS_ENV_VAR_NAME = 'GTEST_CATCH_EXCEPTIONS'
IS_WINDOWS = os.name == 'nt'
# TODO(vladl@google.com): remove the _lin suffix.
GOLDEN_NAME = 'gtest_output_test_golden_lin.txt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_output_test_')
# At least one command we exercise must not have the
# --gtest_internal_skip_environment_and_ad_hoc_tests flag.
COMMAND_LIST_TESTS = ({}, [PROGRAM_PATH, '--gtest_list_tests'])
COMMAND_WITH_COLOR = ({}, [PROGRAM_PATH, '--gtest_color=yes'])
COMMAND_WITH_TIME = ({}, [PROGRAM_PATH,
'--gtest_print_time',
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=FatalFailureTest.*:LoggingTest.*'])
COMMAND_WITH_DISABLED = (
{}, [PROGRAM_PATH,
'--gtest_also_run_disabled_tests',
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=*DISABLED_*'])
COMMAND_WITH_SHARDING = (
{'GTEST_SHARD_INDEX': '1', 'GTEST_TOTAL_SHARDS': '2'},
[PROGRAM_PATH,
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=PassingTest.*'])
GOLDEN_PATH = os.path.join(gtest_test_utils.GetSourceDir(), GOLDEN_NAME)
def ToUnixLineEnding(s):
"""Changes all Windows/Mac line endings in s to UNIX line endings."""
return s.replace('\r\n', '\n').replace('\r', '\n')
def RemoveLocations(test_output):
"""Removes all file location info from a Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with all file location info (in the form of
'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or
'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by
'FILE_NAME:#: '.
"""
return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\: ', r'\1:#: ', test_output)
def RemoveStackTraceDetails(output):
"""Removes all stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n',
'Stack trace: (omitted)\n\n', output)
def RemoveStackTraces(output):
"""Removes all traces of stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n', '', output)
def RemoveTime(output):
"""Removes all time information from a Google Test program's output."""
return re.sub(r'\(\d+ ms', '(? ms', output)
def RemoveTypeInfoDetails(test_output):
"""Removes compiler-specific type info from Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with type information normalized to canonical form.
"""
# some compilers output the name of type 'unsigned int' as 'unsigned'
return re.sub(r'unsigned int', 'unsigned', test_output)
def NormalizeToCurrentPlatform(test_output):
"""Normalizes platform specific output details for easier comparison."""
if IS_WINDOWS:
# Removes the color information that is not present on Windows.
test_output = re.sub('\x1b\\[(0;3\d)?m', '', test_output)
# Changes failure message headers into the Windows format.
test_output = re.sub(r': Failure\n', r': error: ', test_output)
# Changes file(line_number) to file:line_number.
test_output = re.sub(r'((\w|\.)+)\((\d+)\):', r'\1:\3:', test_output)
return test_output
def RemoveTestCounts(output):
"""Removes test counts from a Google Test program's output."""
output = re.sub(r'\d+ tests?, listed below',
'? tests, listed below', output)
output = re.sub(r'\d+ FAILED TESTS',
'? FAILED TESTS', output)
output = re.sub(r'\d+ tests? from \d+ test cases?',
'? tests from ? test cases', output)
output = re.sub(r'\d+ tests? from ([a-zA-Z_])',
r'? tests from \1', output)
return re.sub(r'\d+ tests?\.', '? tests.', output)
def RemoveMatchingTests(test_output, pattern):
"""Removes output of specified tests from a Google Test program's output.
This function strips not only the beginning and the end of a test but also
all output in between.
Args:
test_output: A string containing the test output.
pattern: A regex string that matches names of test cases or
tests to remove.
Returns:
Contents of test_output with tests whose names match pattern removed.
"""
test_output = re.sub(
r'.*\[ RUN \] .*%s(.|\n)*?\[( FAILED | OK )\] .*%s.*\n' % (
pattern, pattern),
'',
test_output)
return re.sub(r'.*%s.*\n' % pattern, '', test_output)
def NormalizeOutput(output):
"""Normalizes output (the output of gtest_output_test_.exe)."""
output = ToUnixLineEnding(output)
output = RemoveLocations(output)
output = RemoveStackTraceDetails(output)
output = RemoveTime(output)
return output
def GetShellCommandOutput(env_cmd):
"""Runs a command in a sub-process, and returns its output in a string.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
Returns:
A string with the command's combined standard and diagnostic output.
"""
# Spawns cmd in a sub-process, and gets its standard I/O file objects.
# Set and save the environment properly.
environ = os.environ.copy()
environ.update(env_cmd[0])
p = gtest_test_utils.Subprocess(env_cmd[1], env=environ)
return p.output
def GetCommandOutput(env_cmd):
"""Runs a command and returns its output with all file location
info stripped off.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
"""
# Disables exception pop-ups on Windows.
environ, cmdline = env_cmd
environ = dict(environ) # Ensures we are modifying a copy.
environ[CATCH_EXCEPTIONS_ENV_VAR_NAME] = '1'
return NormalizeOutput(GetShellCommandOutput((environ, cmdline)))
def GetOutputOfAllCommands():
"""Returns concatenated output from several representative commands."""
return (GetCommandOutput(COMMAND_WITH_COLOR) +
GetCommandOutput(COMMAND_WITH_TIME) +
GetCommandOutput(COMMAND_WITH_DISABLED) +
GetCommandOutput(COMMAND_WITH_SHARDING))
test_list = GetShellCommandOutput(COMMAND_LIST_TESTS)
SUPPORTS_DEATH_TESTS = 'DeathTest' in test_list
SUPPORTS_TYPED_TESTS = 'TypedTest' in test_list
SUPPORTS_THREADS = 'ExpectFailureWithThreadsTest' in test_list
SUPPORTS_STACK_TRACES = False
CAN_GENERATE_GOLDEN_FILE = (SUPPORTS_DEATH_TESTS and
SUPPORTS_TYPED_TESTS and
SUPPORTS_THREADS)
class GTestOutputTest(gtest_test_utils.TestCase):
def RemoveUnsupportedTests(self, test_output):
if not SUPPORTS_DEATH_TESTS:
test_output = RemoveMatchingTests(test_output, 'DeathTest')
if not SUPPORTS_TYPED_TESTS:
test_output = RemoveMatchingTests(test_output, 'TypedTest')
test_output = RemoveMatchingTests(test_output, 'TypedDeathTest')
test_output = RemoveMatchingTests(test_output, 'TypeParamDeathTest')
if not SUPPORTS_THREADS:
test_output = RemoveMatchingTests(test_output,
'ExpectFailureWithThreadsTest')
test_output = RemoveMatchingTests(test_output,
'ScopedFakeTestPartResultReporterTest')
test_output = RemoveMatchingTests(test_output,
'WorksConcurrently')
if not SUPPORTS_STACK_TRACES:
test_output = RemoveStackTraces(test_output)
return test_output
def testOutput(self):
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'rb')
# A mis-configured source control system can cause \r appear in EOL
# sequences when we read the golden file irrespective of an operating
# system used. Therefore, we need to strip those \r's from newlines
# unconditionally.
golden = ToUnixLineEnding(golden_file.read())
golden_file.close()
# We want the test to pass regardless of certain features being
# supported or not.
# We still have to remove type name specifics in all cases.
normalized_actual = RemoveTypeInfoDetails(output)
normalized_golden = RemoveTypeInfoDetails(golden)
if CAN_GENERATE_GOLDEN_FILE:
self.assertEqual(normalized_golden, normalized_actual)
else:
normalized_actual = NormalizeToCurrentPlatform(
RemoveTestCounts(normalized_actual))
normalized_golden = NormalizeToCurrentPlatform(
RemoveTestCounts(self.RemoveUnsupportedTests(normalized_golden)))
# This code is very handy when debugging golden file differences:
if os.getenv('DEBUG_GTEST_OUTPUT_TEST'):
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_actual.txt'), 'wb').write(
normalized_actual)
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_golden.txt'), 'wb').write(
normalized_golden)
self.assertEqual(normalized_golden, normalized_actual)
if __name__ == '__main__':
if sys.argv[1:] == [GENGOLDEN_FLAG]:
if CAN_GENERATE_GOLDEN_FILE:
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'wb')
golden_file.write(output)
golden_file.close()
else:
message = (
"""Unable to write a golden file when compiled in an environment
that does not support all the required features (death tests, typed tests,
and multiple threads). Please generate the golden file using a binary built
with those features enabled.""")
sys.stderr.write(message)
sys.exit(1)
else:
gtest_test_utils.Main()
|
custode/reviewboard
|
refs/heads/master
|
contrib/internal/conf/settings_local.py
|
7
|
from __future__ import unicode_literals
import os
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'build-media.db',
}
}
LOCAL_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', '..', '..', 'reviewboard'))
PRODUCTION = False
DEBUG = False
SECRET_KEY = '1234'
|
csababarta/ntdsxtract
|
refs/heads/master
|
ntds/dsdatabase.py
|
1
|
# This file is part of ntdsxtract.
#
# ntdsxtract is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ntdsxtract is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ntdsxtract. If not, see <http://www.gnu.org/licenses/>.
'''
@author: Csaba Barta
@license: GNU General Public License 2.0 or later
@contact: csaba.barta@gmail.com
'''
import sys
from stat import *
from os import stat
from os import path
import time
import ntds.dsfielddictionary
from ntds.dsencryption import *
from lib.map import *
from lib.sid import *
from lib.guid import *
import pickle
dsMapOffsetByLineId = {} #Map that can be used to find the offset for line
dsMapLineIdByRecordId = {} #Map that can be used to find the line for record
dsMapTypeByRecordId = {} #Map that can be used to find the type for record
dsMapRecordIdByName = {} #Map that can be used to find the record for name
dsMapChildsByRecordId = {} #Map that can be used to find child objects
dsMapTypeIdByTypeName = {} #Map that can be used to find child objects
dsMapRecordIdByTypeId = {} #Map that can be used to find all the records that have a type
dsMapRecordIdBySID = {} #Map that can be used to find the record for a SID
dsMapRecordIdByGUID = {} #Map that can be used to find the record for a GUID
dsSchemaTypeId = -1
dsDatabaseSize = -1
def dsInitDatabase(dsESEFile, workdir):
global dsDatabaseSize
dsDatabaseSize = stat(dsESEFile).st_size
sys.stderr.write("\n[+] Initialising engine...\n")
db = open(dsESEFile , 'rb', 0)
db.seek(0)
line = db.readline()
if line == "":
sys.stderr.write("[!] Warning! Error processing the first line!\n")
sys.exit(1)
else:
dsFieldNameRecord = line.split('\t')
record = line.split('\t')
for cid in range(0, len(record)-1):
#------------------------------------------------------------------------------
# filling indexes for object attributes
#------------------------------------------------------------------------------
if (record[cid] == "DNT_col"):
ntds.dsfielddictionary.dsRecordIdIndex = cid
if (record[cid] == "PDNT_col"):
ntds.dsfielddictionary.dsParentRecordIdIndex = cid
if (record[cid] == "time_col"):
ntds.dsfielddictionary.dsRecordTimeIndex = cid
if (record[cid] == "Ancestors_col"):
ntds.dsfielddictionary.dsAncestorsIndex = cid
if (record[cid] == "ATTb590606"):
ntds.dsfielddictionary.dsObjectTypeIdIndex = cid
if (record[cid] == "ATTm3"):
ntds.dsfielddictionary.dsObjectNameIndex = cid
if (record[cid] == "ATTm589825"):
ntds.dsfielddictionary.dsObjectName2Index = cid
if (record[cid] == "ATTk589826"):
ntds.dsfielddictionary.dsObjectGUIDIndex = cid
if (record[cid] == "ATTl131074"):
ntds.dsfielddictionary.dsWhenCreatedIndex = cid
if (record[cid] == "ATTl131075"):
ntds.dsfielddictionary.dsWhenChangedIndex = cid
if (record[cid] == "ATTq131091"):
ntds.dsfielddictionary.dsUSNCreatedIndex = cid
if (record[cid] == "ATTq131192"):
ntds.dsfielddictionary.dsUSNChangedIndex = cid
if (record[cid] == "OBJ_col"):
ntds.dsfielddictionary.dsObjectColIndex = cid
if (record[cid] == "ATTi131120"):
ntds.dsfielddictionary.dsIsDeletedIndex = cid
#------------------------------------------------------------------------------
# Filling indexes for deleted object attributes
#------------------------------------------------------------------------------
if (record[cid] == "ATTb590605"):
ntds.dsfielddictionary.dsOrigContainerIdIndex = cid
#------------------------------------------------------------------------------
# Filling indexes for account object attributes
#------------------------------------------------------------------------------
if (record[cid] == "ATTr589970"):
ntds.dsfielddictionary.dsSIDIndex = cid
if (record[cid] == "ATTm590045"):
ntds.dsfielddictionary.dsSAMAccountNameIndex = cid
if (record[cid] == "ATTm590480"):
ntds.dsfielddictionary.dsUserPrincipalNameIndex = cid
if (record[cid] == "ATTj590126"):
ntds.dsfielddictionary.dsSAMAccountTypeIndex = cid
if (record[cid] == "ATTj589832"):
ntds.dsfielddictionary.dsUserAccountControlIndex = cid
if (record[cid] == "ATTq589876"):
ntds.dsfielddictionary.dsLastLogonIndex = cid
if (record[cid] == "ATTq591520"):
ntds.dsfielddictionary.dsLastLogonTimeStampIndex = cid
if (record[cid] == "ATTq589983"):
ntds.dsfielddictionary.dsAccountExpiresIndex = cid
if (record[cid] == "ATTq589920"):
ntds.dsfielddictionary.dsPasswordLastSetIndex = cid
if (record[cid] == "ATTq589873"):
ntds.dsfielddictionary.dsBadPwdTimeIndex = cid
if (record[cid] == "ATTj589993"):
ntds.dsfielddictionary.dsLogonCountIndex = cid
if (record[cid] == "ATTj589836"):
ntds.dsfielddictionary.dsBadPwdCountIndex = cid
if (record[cid] == "ATTj589922"):
ntds.dsfielddictionary.dsPrimaryGroupIdIndex = cid
if (record[cid] == "ATTk589914"):
ntds.dsfielddictionary.dsNTHashIndex = cid
if (record[cid] == "ATTk589879"):
ntds.dsfielddictionary.dsLMHashIndex = cid
if (record[cid] == "ATTk589918"):
ntds.dsfielddictionary.dsNTHashHistoryIndex = cid
if (record[cid] == "ATTk589984"):
ntds.dsfielddictionary.dsLMHashHistoryIndex = cid
if (record[cid] == "ATTk591734"):
ntds.dsfielddictionary.dsUnixPasswordIndex = cid
if (record[cid] == "ATTk36"):
ntds.dsfielddictionary.dsADUserObjectsIndex = cid
if (record[cid] == "ATTk589949"):
ntds.dsfielddictionary.dsSupplementalCredentialsIndex = cid
#------------------------------------------------------------------------------
# Filling indexes for computer objects attributes
#------------------------------------------------------------------------------
if (record[cid] == "ATTj589993"):
ntds.dsfielddictionary.dsLogonCountIndex = cid
if (record[cid] == "ATTm590443"):
ntds.dsfielddictionary.dsDNSHostNameIndex = cid
if (record[cid] == "ATTm590187"):
ntds.dsfielddictionary.dsOSNameIndex = cid
if (record[cid] == "ATTm590188"):
ntds.dsfielddictionary.dsOSVersionIndex = cid
#------------------------------------------------------------------------------
# Filling indexes for bitlocker objects
#------------------------------------------------------------------------------
if (record[cid] == "ATTm591788"):
ntds.dsfielddictionary.dsRecoveryPasswordIndex = cid
if (record[cid] == "ATTk591823"):
ntds.dsfielddictionary.dsFVEKeyPackageIndex = cid
if (record[cid] == "ATTk591822"):
ntds.dsfielddictionary.dsVolumeGUIDIndex = cid
if (record[cid] == "ATTk591789"):
ntds.dsfielddictionary.dsRecoveryGUIDIndex = cid
#------------------------------------------------------------------------------
# Filling indexes for bitlocker objects
#------------------------------------------------------------------------------
if (record[cid] == "ATTi590943"):
ntds.dsfielddictionary.dsDialInAccessPermission = cid
#===============================================================================
# Filling indexes for AD encryption
#===============================================================================
if (record[cid] == "ATTk590689"):
ntds.dsfielddictionary.dsPEKIndex = cid
db.seek(0)
dsCheckMaps(db, workdir)
return db
def dsCheckMaps(dsDatabase, workdir):
try:
global dsMapOffsetByLineId
global dsMapLineIdByRecordId
global dsMapRecordIdByName
global dsMapTypeByRecordId
global dsMapChildsByRecordId
global dsMapTypeIdByTypeName
global dsMapRecordIdByTypeId
global dsMapRecordIdBySID
global dsMapRecordIdByGUID
sys.stderr.write("[+] Loading saved map files (Stage 1)...\n")
dsLoadMap(path.join(workdir, "offlid.map"), dsMapOffsetByLineId)
dsLoadMap(path.join(workdir, "lidrid.map"), dsMapLineIdByRecordId)
dsLoadMap(path.join(workdir, "ridname.map"), dsMapRecordIdByName)
dsLoadMap(path.join(workdir, "typerid.map"), dsMapTypeByRecordId)
dsLoadMap(path.join(workdir, "childsrid.map"), dsMapChildsByRecordId)
dsLoadMap(path.join(workdir, "typeidname.map"), dsMapTypeIdByTypeName)
dsLoadMap(path.join(workdir, "ridsid.map"), dsMapRecordIdBySID)
dsLoadMap(path.join(workdir, "ridguid.map"), dsMapRecordIdByGUID)
dsLoadMap(path.join(workdir, "ridtype.map"), dsMapRecordIdByTypeId)
pek = open(path.join(workdir, "pek.map"), "rb")
ntds.dsfielddictionary.dsEncryptedPEK = pek.read()
pek.close()
except Exception as e:
sys.stderr.write("[!] Warning: Opening saved maps failed: " + str(e) + "\n")
sys.stderr.write("[+] Rebuilding maps...\n")
dsBuildMaps(dsDatabase, workdir)
pass
def dsBuildMaps(dsDatabase, workdir):
global dsMapOffsetByLineId
global dsMapLineIdByRecordId
global dsMapRecordIdByName
global dsMapTypeByRecordId
global dsMapChildsByRecordId
global dsMapRecordIdBySID
global dsMapRecordIdbyGUID
global dsSchemaTypeId
lineid = 0
while True:
sys.stderr.write("\r[+] Scanning database - %d%% -> %d records processed" % (
dsDatabase.tell()*100/dsDatabaseSize,
lineid+1
))
sys.stderr.flush()
try:
dsMapOffsetByLineId[lineid] = dsDatabase.tell()
except:
sys.stderr.write("\n[!] Warning! Error at dsMapOffsetByLineId!\n")
pass
line = dsDatabase.readline()
if line == "":
break
record = line.split('\t')
if lineid != 0:
#===================================================================
# This record will always be the record representing the domain
# object
# This should be the only record containing the PEK
#===================================================================
if record[ntds.dsfielddictionary.dsPEKIndex] != "":
if ntds.dsfielddictionary.dsEncryptedPEK != "":
sys.stderr.write("\n[!] Warning! Multiple records with PEK entry!\n")
ntds.dsfielddictionary.dsEncryptedPEK = record[ntds.dsfielddictionary.dsPEKIndex]
try:
dsMapLineIdByRecordId[int(record[ntds.dsfielddictionary.dsRecordIdIndex])] = lineid
except:
sys.stderr.write("\n[!] Warning! Error at dsMapLineIdByRecordId!\n")
pass
try:
tmp = dsMapRecordIdByName[record[ntds.dsfielddictionary.dsObjectName2Index]]
# Also save the Schema type id for future use
if record[ntds.dsfielddictionary.dsObjectName2Index] == "Schema":
if dsSchemaTypeId == -1 and record[ntds.dsfielddictionary.dsObjectTypeIdIndex] != "":
dsSchemaTypeId = int(record[ntds.dsfielddictionary.dsObjectTypeIdIndex])
else:
sys.stderr.write("\n[!] Warning! There is more than one Schema object! The DB is inconsistent!\n")
except:
dsMapRecordIdByName[record[ntds.dsfielddictionary.dsObjectName2Index]] = int(record[ntds.dsfielddictionary.dsRecordIdIndex])
if record[ntds.dsfielddictionary.dsObjectName2Index] == "Schema":
if dsSchemaTypeId == -1 and record[ntds.dsfielddictionary.dsObjectTypeIdIndex] != "":
dsSchemaTypeId = int(record[ntds.dsfielddictionary.dsObjectTypeIdIndex])
else:
sys.stderr.write("\n[!] Warning! There is more than one Schema object! The DB is inconsistent!\n")
pass
try:
dsMapTypeByRecordId[int(record[ntds.dsfielddictionary.dsRecordIdIndex])] = record[ntds.dsfielddictionary.dsObjectTypeIdIndex]
except:
sys.stderr.write("\n[!] Warning! Error at dsMapTypeByRecordId!\n")
pass
try:
tmp = dsMapChildsByRecordId[int(record[ntds.dsfielddictionary.dsRecordIdIndex])]
except KeyError:
dsMapChildsByRecordId[int(record[ntds.dsfielddictionary.dsRecordIdIndex])] = []
pass
try:
dsMapChildsByRecordId[int(record[ntds.dsfielddictionary.dsParentRecordIdIndex])].append(int(record[ntds.dsfielddictionary.dsRecordIdIndex]))
except KeyError:
dsMapChildsByRecordId[int(record[ntds.dsfielddictionary.dsParentRecordIdIndex])] = []
dsMapChildsByRecordId[int(record[ntds.dsfielddictionary.dsParentRecordIdIndex])].append(int(record[ntds.dsfielddictionary.dsRecordIdIndex]))
try:
dsMapRecordIdBySID[str(SID(record[ntds.dsfielddictionary.dsSIDIndex]))]
except KeyError:
dsMapRecordIdBySID[str(SID(record[ntds.dsfielddictionary.dsSIDIndex]))] = int(record[ntds.dsfielddictionary.dsRecordIdIndex])
try:
dsMapRecordIdByGUID[str(GUID(record[ntds.dsfielddictionary.dsObjectGUIDIndex]))]
except KeyError:
dsMapRecordIdByGUID[str(GUID(record[ntds.dsfielddictionary.dsObjectGUIDIndex]))] = int(record[ntds.dsfielddictionary.dsRecordIdIndex])
try:
if record[ntds.dsfielddictionary.dsObjectTypeIdIndex] != "":
dsMapRecordIdByTypeId[int(record[ntds.dsfielddictionary.dsObjectTypeIdIndex])].append(int(record[ntds.dsfielddictionary.dsRecordIdIndex]))
except KeyError:
dsMapRecordIdByTypeId[int(record[ntds.dsfielddictionary.dsObjectTypeIdIndex])] = []
dsMapRecordIdByTypeId[int(record[ntds.dsfielddictionary.dsObjectTypeIdIndex])].append(int(record[ntds.dsfielddictionary.dsRecordIdIndex]))
lineid += 1
sys.stderr.write("\n")
offlid = open(path.join(workdir, "offlid.map"), "wb")
pickle.dump(dsMapOffsetByLineId, offlid)
offlid.close()
lidrid = open(path.join(workdir, "lidrid.map"), "wb")
pickle.dump(dsMapLineIdByRecordId, lidrid)
lidrid.close()
ridname = open(path.join(workdir, "ridname.map"), "wb")
pickle.dump(dsMapRecordIdByName, ridname)
ridname.close()
typerid = open(path.join(workdir, "typerid.map"), "wb")
pickle.dump(dsMapTypeByRecordId, typerid)
typerid.close()
childsrid = open(path.join(workdir, "childsrid.map"), "wb")
pickle.dump(dsMapChildsByRecordId, childsrid)
childsrid.close()
pek = open(path.join(workdir, "pek.map"), "wb")
pek.write(ntds.dsfielddictionary.dsEncryptedPEK)
pek.close()
ridsid = open(path.join(workdir, "ridsid.map"), "wb")
pickle.dump(dsMapRecordIdBySID, ridsid)
ridsid.close()
ridguid = open(path.join(workdir, "ridguid.map"), "wb")
pickle.dump(dsMapRecordIdByGUID, ridguid)
ridguid.close()
ridtype = open(path.join(workdir, "ridtype.map"), "wb")
pickle.dump(dsMapRecordIdByTypeId, ridtype)
ridtype.close()
dsBuildTypeMap(dsDatabase, workdir)
def dsBuildTypeMap(dsDatabase, workdir):
global dsMapTypeIdByTypeName
global dsMapLineIdByRecordId
global dsMapChildsByRecordId
global dsSchemaTypeId
schemarecid = -1
sys.stderr.write("[+] Sanity checks...\n")
if dsSchemaTypeId == -1:
sys.stderr.write("[!] Error! The Schema object's type id cannot be found! The DB is inconsistent!\n")
sys.exit(1)
elif len(dsMapRecordIdByTypeId[dsSchemaTypeId]) > 1:
sys.stderr.write("[!] Warning! There are more than 1 schema objects! The DB is inconsistent!\n")
sys.stderr.write(" Schema record ids: " + str(dsMapRecordIdByTypeId[dsSchemaTypeId]) + "\n")
sys.stderr.write(" Please select the schema id you would like to use!\n")
tmp = raw_input()
while True:
try:
if int(tmp) in dsMapRecordIdByTypeId[dsSchemaTypeId]:
schemarecid = int(tmp)
break
else:
sys.stderr.write(" Please enter a number that is in the list of ids!\n")
tmp = raw_input()
except:
sys.stderr.write(" Please enter a number!\n")
tmp = raw_input()
elif len(dsMapRecordIdByTypeId[dsSchemaTypeId]) == 0:
sys.stderr.write("[!] Warning! There is no schema object! The DB is inconsistent!\n")
else:
schemarecid = dsMapRecordIdByTypeId[dsSchemaTypeId][0]
sys.stderr.write(" Schema record id: %d\n" % schemarecid)
sys.stderr.write(" Schema type id: %d\n" % int(dsMapTypeByRecordId[schemarecid]))
sys.stderr.flush()
schemachilds = dsMapChildsByRecordId[schemarecid]
i = 0
l = len(schemachilds)
for child in schemachilds:
sys.stderr.write("\r[+] Extracting schema information - %d%% -> %d records processed" % (
i*100/l,
i+1
))
sys.stderr.flush()
lineid = int(dsMapLineIdByRecordId[int(child)])
offset = int(dsMapOffsetByLineId[int(lineid)])
dsDatabase.seek(offset)
record = ""
line = ""
line = dsDatabase.readline()
if line != "":
record = line.split('\t')
name = record[ntds.dsfielddictionary.dsObjectName2Index]
dsMapTypeIdByTypeName[name] = child
i += 1
typeidname = open(path.join(workdir, "typeidname.map"), "wb")
pickle.dump(dsMapTypeIdByTypeName, typeidname)
typeidname.close()
sys.stderr.write("\r[+] Extracting schema information - %d%% -> %d records processed" % (
100,
i
))
sys.stderr.write("\n")
sys.stderr.flush()
def dsInitEncryption(syshive_fname):
bootkey = get_syskey(syshive_fname)
enc_pek = unhexlify(ntds.dsfielddictionary.dsEncryptedPEK[16:])
ntds.dsfielddictionary.dsPEK=dsDecryptPEK(bootkey, enc_pek)
|
gritlogic/incubator-airflow
|
refs/heads/master
|
airflow/example_dags/example_latest_only.py
|
10
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Example of the LatestOnlyOperator
"""
import datetime as dt
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.latest_only_operator import LatestOnlyOperator
from airflow.utils.trigger_rule import TriggerRule
dag = DAG(
dag_id='latest_only',
schedule_interval=dt.timedelta(hours=4),
start_date=dt.datetime(2016, 9, 20),
)
latest_only = LatestOnlyOperator(task_id='latest_only', dag=dag)
task1 = DummyOperator(task_id='task1', dag=dag)
task1.set_upstream(latest_only)
|
hazelcast/hazelcast-python-client
|
refs/heads/master
|
tests/hzrc/RemoteController.py
|
1
|
# fmt: off
#
# Autogenerated by Thrift Compiler (0.13.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py:new_style,utf8strings
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
import logging
from .ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
all_structs = []
class Iface(object):
def ping(self):
pass
def clean(self):
pass
def exit(self):
pass
def createCluster(self, hzVersion, xmlconfig):
"""
Parameters:
- hzVersion
- xmlconfig
"""
pass
def createClusterKeepClusterName(self, hzVersion, xmlconfig):
"""
Parameters:
- hzVersion
- xmlconfig
"""
pass
def startMember(self, clusterId):
"""
Parameters:
- clusterId
"""
pass
def shutdownMember(self, clusterId, memberId):
"""
Parameters:
- clusterId
- memberId
"""
pass
def terminateMember(self, clusterId, memberId):
"""
Parameters:
- clusterId
- memberId
"""
pass
def suspendMember(self, clusterId, memberId):
"""
Parameters:
- clusterId
- memberId
"""
pass
def resumeMember(self, clusterId, memberId):
"""
Parameters:
- clusterId
- memberId
"""
pass
def shutdownCluster(self, clusterId):
"""
Parameters:
- clusterId
"""
pass
def terminateCluster(self, clusterId):
"""
Parameters:
- clusterId
"""
pass
def splitMemberFromCluster(self, memberId):
"""
Parameters:
- memberId
"""
pass
def mergeMemberToCluster(self, clusterId, memberId):
"""
Parameters:
- clusterId
- memberId
"""
pass
def executeOnController(self, clusterId, script, lang):
"""
Parameters:
- clusterId
- script
- lang
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def ping(self):
self.send_ping()
return self.recv_ping()
def send_ping(self):
self._oprot.writeMessageBegin('ping', TMessageType.CALL, self._seqid)
args = ping_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_ping(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = ping_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "ping failed: unknown result")
def clean(self):
self.send_clean()
return self.recv_clean()
def send_clean(self):
self._oprot.writeMessageBegin('clean', TMessageType.CALL, self._seqid)
args = clean_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_clean(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = clean_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "clean failed: unknown result")
def exit(self):
self.send_exit()
return self.recv_exit()
def send_exit(self):
self._oprot.writeMessageBegin('exit', TMessageType.CALL, self._seqid)
args = exit_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_exit(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = exit_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "exit failed: unknown result")
def createCluster(self, hzVersion, xmlconfig):
"""
Parameters:
- hzVersion
- xmlconfig
"""
self.send_createCluster(hzVersion, xmlconfig)
return self.recv_createCluster()
def send_createCluster(self, hzVersion, xmlconfig):
self._oprot.writeMessageBegin('createCluster', TMessageType.CALL, self._seqid)
args = createCluster_args()
args.hzVersion = hzVersion
args.xmlconfig = xmlconfig
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_createCluster(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = createCluster_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.serverException is not None:
raise result.serverException
raise TApplicationException(TApplicationException.MISSING_RESULT, "createCluster failed: unknown result")
def createClusterKeepClusterName(self, hzVersion, xmlconfig):
"""
Parameters:
- hzVersion
- xmlconfig
"""
self.send_createClusterKeepClusterName(hzVersion, xmlconfig)
return self.recv_createClusterKeepClusterName()
def send_createClusterKeepClusterName(self, hzVersion, xmlconfig):
self._oprot.writeMessageBegin('createClusterKeepClusterName', TMessageType.CALL, self._seqid)
args = createClusterKeepClusterName_args()
args.hzVersion = hzVersion
args.xmlconfig = xmlconfig
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_createClusterKeepClusterName(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = createClusterKeepClusterName_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.serverException is not None:
raise result.serverException
raise TApplicationException(TApplicationException.MISSING_RESULT, "createClusterKeepClusterName failed: unknown result")
def startMember(self, clusterId):
"""
Parameters:
- clusterId
"""
self.send_startMember(clusterId)
return self.recv_startMember()
def send_startMember(self, clusterId):
self._oprot.writeMessageBegin('startMember', TMessageType.CALL, self._seqid)
args = startMember_args()
args.clusterId = clusterId
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_startMember(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = startMember_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.serverException is not None:
raise result.serverException
raise TApplicationException(TApplicationException.MISSING_RESULT, "startMember failed: unknown result")
def shutdownMember(self, clusterId, memberId):
"""
Parameters:
- clusterId
- memberId
"""
self.send_shutdownMember(clusterId, memberId)
return self.recv_shutdownMember()
def send_shutdownMember(self, clusterId, memberId):
self._oprot.writeMessageBegin('shutdownMember', TMessageType.CALL, self._seqid)
args = shutdownMember_args()
args.clusterId = clusterId
args.memberId = memberId
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_shutdownMember(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = shutdownMember_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "shutdownMember failed: unknown result")
def terminateMember(self, clusterId, memberId):
"""
Parameters:
- clusterId
- memberId
"""
self.send_terminateMember(clusterId, memberId)
return self.recv_terminateMember()
def send_terminateMember(self, clusterId, memberId):
self._oprot.writeMessageBegin('terminateMember', TMessageType.CALL, self._seqid)
args = terminateMember_args()
args.clusterId = clusterId
args.memberId = memberId
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_terminateMember(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = terminateMember_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "terminateMember failed: unknown result")
def suspendMember(self, clusterId, memberId):
"""
Parameters:
- clusterId
- memberId
"""
self.send_suspendMember(clusterId, memberId)
return self.recv_suspendMember()
def send_suspendMember(self, clusterId, memberId):
self._oprot.writeMessageBegin('suspendMember', TMessageType.CALL, self._seqid)
args = suspendMember_args()
args.clusterId = clusterId
args.memberId = memberId
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_suspendMember(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = suspendMember_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "suspendMember failed: unknown result")
def resumeMember(self, clusterId, memberId):
"""
Parameters:
- clusterId
- memberId
"""
self.send_resumeMember(clusterId, memberId)
return self.recv_resumeMember()
def send_resumeMember(self, clusterId, memberId):
self._oprot.writeMessageBegin('resumeMember', TMessageType.CALL, self._seqid)
args = resumeMember_args()
args.clusterId = clusterId
args.memberId = memberId
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_resumeMember(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = resumeMember_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "resumeMember failed: unknown result")
def shutdownCluster(self, clusterId):
"""
Parameters:
- clusterId
"""
self.send_shutdownCluster(clusterId)
return self.recv_shutdownCluster()
def send_shutdownCluster(self, clusterId):
self._oprot.writeMessageBegin('shutdownCluster', TMessageType.CALL, self._seqid)
args = shutdownCluster_args()
args.clusterId = clusterId
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_shutdownCluster(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = shutdownCluster_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "shutdownCluster failed: unknown result")
def terminateCluster(self, clusterId):
"""
Parameters:
- clusterId
"""
self.send_terminateCluster(clusterId)
return self.recv_terminateCluster()
def send_terminateCluster(self, clusterId):
self._oprot.writeMessageBegin('terminateCluster', TMessageType.CALL, self._seqid)
args = terminateCluster_args()
args.clusterId = clusterId
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_terminateCluster(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = terminateCluster_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "terminateCluster failed: unknown result")
def splitMemberFromCluster(self, memberId):
"""
Parameters:
- memberId
"""
self.send_splitMemberFromCluster(memberId)
return self.recv_splitMemberFromCluster()
def send_splitMemberFromCluster(self, memberId):
self._oprot.writeMessageBegin('splitMemberFromCluster', TMessageType.CALL, self._seqid)
args = splitMemberFromCluster_args()
args.memberId = memberId
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_splitMemberFromCluster(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = splitMemberFromCluster_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "splitMemberFromCluster failed: unknown result")
def mergeMemberToCluster(self, clusterId, memberId):
"""
Parameters:
- clusterId
- memberId
"""
self.send_mergeMemberToCluster(clusterId, memberId)
return self.recv_mergeMemberToCluster()
def send_mergeMemberToCluster(self, clusterId, memberId):
self._oprot.writeMessageBegin('mergeMemberToCluster', TMessageType.CALL, self._seqid)
args = mergeMemberToCluster_args()
args.clusterId = clusterId
args.memberId = memberId
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_mergeMemberToCluster(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = mergeMemberToCluster_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "mergeMemberToCluster failed: unknown result")
def executeOnController(self, clusterId, script, lang):
"""
Parameters:
- clusterId
- script
- lang
"""
self.send_executeOnController(clusterId, script, lang)
return self.recv_executeOnController()
def send_executeOnController(self, clusterId, script, lang):
self._oprot.writeMessageBegin('executeOnController', TMessageType.CALL, self._seqid)
args = executeOnController_args()
args.clusterId = clusterId
args.script = script
args.lang = lang
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_executeOnController(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = executeOnController_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "executeOnController failed: unknown result")
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["ping"] = Processor.process_ping
self._processMap["clean"] = Processor.process_clean
self._processMap["exit"] = Processor.process_exit
self._processMap["createCluster"] = Processor.process_createCluster
self._processMap["createClusterKeepClusterName"] = Processor.process_createClusterKeepClusterName
self._processMap["startMember"] = Processor.process_startMember
self._processMap["shutdownMember"] = Processor.process_shutdownMember
self._processMap["terminateMember"] = Processor.process_terminateMember
self._processMap["suspendMember"] = Processor.process_suspendMember
self._processMap["resumeMember"] = Processor.process_resumeMember
self._processMap["shutdownCluster"] = Processor.process_shutdownCluster
self._processMap["terminateCluster"] = Processor.process_terminateCluster
self._processMap["splitMemberFromCluster"] = Processor.process_splitMemberFromCluster
self._processMap["mergeMemberToCluster"] = Processor.process_mergeMemberToCluster
self._processMap["executeOnController"] = Processor.process_executeOnController
self._on_message_begin = None
def on_message_begin(self, func):
self._on_message_begin = func
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if self._on_message_begin:
self._on_message_begin(name, type, seqid)
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_ping(self, seqid, iprot, oprot):
args = ping_args()
args.read(iprot)
iprot.readMessageEnd()
result = ping_result()
try:
result.success = self._handler.ping()
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("ping", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_clean(self, seqid, iprot, oprot):
args = clean_args()
args.read(iprot)
iprot.readMessageEnd()
result = clean_result()
try:
result.success = self._handler.clean()
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("clean", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_exit(self, seqid, iprot, oprot):
args = exit_args()
args.read(iprot)
iprot.readMessageEnd()
result = exit_result()
try:
result.success = self._handler.exit()
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("exit", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_createCluster(self, seqid, iprot, oprot):
args = createCluster_args()
args.read(iprot)
iprot.readMessageEnd()
result = createCluster_result()
try:
result.success = self._handler.createCluster(args.hzVersion, args.xmlconfig)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except ServerException as serverException:
msg_type = TMessageType.REPLY
result.serverException = serverException
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("createCluster", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_createClusterKeepClusterName(self, seqid, iprot, oprot):
args = createClusterKeepClusterName_args()
args.read(iprot)
iprot.readMessageEnd()
result = createClusterKeepClusterName_result()
try:
result.success = self._handler.createClusterKeepClusterName(args.hzVersion, args.xmlconfig)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except ServerException as serverException:
msg_type = TMessageType.REPLY
result.serverException = serverException
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("createClusterKeepClusterName", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_startMember(self, seqid, iprot, oprot):
args = startMember_args()
args.read(iprot)
iprot.readMessageEnd()
result = startMember_result()
try:
result.success = self._handler.startMember(args.clusterId)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except ServerException as serverException:
msg_type = TMessageType.REPLY
result.serverException = serverException
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("startMember", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_shutdownMember(self, seqid, iprot, oprot):
args = shutdownMember_args()
args.read(iprot)
iprot.readMessageEnd()
result = shutdownMember_result()
try:
result.success = self._handler.shutdownMember(args.clusterId, args.memberId)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("shutdownMember", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_terminateMember(self, seqid, iprot, oprot):
args = terminateMember_args()
args.read(iprot)
iprot.readMessageEnd()
result = terminateMember_result()
try:
result.success = self._handler.terminateMember(args.clusterId, args.memberId)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("terminateMember", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_suspendMember(self, seqid, iprot, oprot):
args = suspendMember_args()
args.read(iprot)
iprot.readMessageEnd()
result = suspendMember_result()
try:
result.success = self._handler.suspendMember(args.clusterId, args.memberId)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("suspendMember", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_resumeMember(self, seqid, iprot, oprot):
args = resumeMember_args()
args.read(iprot)
iprot.readMessageEnd()
result = resumeMember_result()
try:
result.success = self._handler.resumeMember(args.clusterId, args.memberId)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("resumeMember", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_shutdownCluster(self, seqid, iprot, oprot):
args = shutdownCluster_args()
args.read(iprot)
iprot.readMessageEnd()
result = shutdownCluster_result()
try:
result.success = self._handler.shutdownCluster(args.clusterId)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("shutdownCluster", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_terminateCluster(self, seqid, iprot, oprot):
args = terminateCluster_args()
args.read(iprot)
iprot.readMessageEnd()
result = terminateCluster_result()
try:
result.success = self._handler.terminateCluster(args.clusterId)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("terminateCluster", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_splitMemberFromCluster(self, seqid, iprot, oprot):
args = splitMemberFromCluster_args()
args.read(iprot)
iprot.readMessageEnd()
result = splitMemberFromCluster_result()
try:
result.success = self._handler.splitMemberFromCluster(args.memberId)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("splitMemberFromCluster", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_mergeMemberToCluster(self, seqid, iprot, oprot):
args = mergeMemberToCluster_args()
args.read(iprot)
iprot.readMessageEnd()
result = mergeMemberToCluster_result()
try:
result.success = self._handler.mergeMemberToCluster(args.clusterId, args.memberId)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("mergeMemberToCluster", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_executeOnController(self, seqid, iprot, oprot):
args = executeOnController_args()
args.read(iprot)
iprot.readMessageEnd()
result = executeOnController_result()
try:
result.success = self._handler.executeOnController(args.clusterId, args.script, args.lang)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("executeOnController", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class ping_args(object):
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('ping_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(ping_args)
ping_args.thrift_spec = (
)
class ping_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('ping_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(ping_result)
ping_result.thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
)
class clean_args(object):
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('clean_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(clean_args)
clean_args.thrift_spec = (
)
class clean_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('clean_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(clean_result)
clean_result.thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
)
class exit_args(object):
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('exit_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(exit_args)
exit_args.thrift_spec = (
)
class exit_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('exit_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(exit_result)
exit_result.thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
)
class createCluster_args(object):
"""
Attributes:
- hzVersion
- xmlconfig
"""
def __init__(self, hzVersion=None, xmlconfig=None,):
self.hzVersion = hzVersion
self.xmlconfig = xmlconfig
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.hzVersion = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.xmlconfig = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('createCluster_args')
if self.hzVersion is not None:
oprot.writeFieldBegin('hzVersion', TType.STRING, 1)
oprot.writeString(self.hzVersion.encode('utf-8') if sys.version_info[0] == 2 else self.hzVersion)
oprot.writeFieldEnd()
if self.xmlconfig is not None:
oprot.writeFieldBegin('xmlconfig', TType.STRING, 2)
oprot.writeString(self.xmlconfig.encode('utf-8') if sys.version_info[0] == 2 else self.xmlconfig)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(createCluster_args)
createCluster_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'hzVersion', 'UTF8', None, ), # 1
(2, TType.STRING, 'xmlconfig', 'UTF8', None, ), # 2
)
class createCluster_result(object):
"""
Attributes:
- success
- serverException
"""
def __init__(self, success=None, serverException=None,):
self.success = success
self.serverException = serverException
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = Cluster()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.serverException = ServerException()
self.serverException.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('createCluster_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.serverException is not None:
oprot.writeFieldBegin('serverException', TType.STRUCT, 1)
self.serverException.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(createCluster_result)
createCluster_result.thrift_spec = (
(0, TType.STRUCT, 'success', [Cluster, None], None, ), # 0
(1, TType.STRUCT, 'serverException', [ServerException, None], None, ), # 1
)
class createClusterKeepClusterName_args(object):
"""
Attributes:
- hzVersion
- xmlconfig
"""
def __init__(self, hzVersion=None, xmlconfig=None,):
self.hzVersion = hzVersion
self.xmlconfig = xmlconfig
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.hzVersion = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.xmlconfig = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('createClusterKeepClusterName_args')
if self.hzVersion is not None:
oprot.writeFieldBegin('hzVersion', TType.STRING, 1)
oprot.writeString(self.hzVersion.encode('utf-8') if sys.version_info[0] == 2 else self.hzVersion)
oprot.writeFieldEnd()
if self.xmlconfig is not None:
oprot.writeFieldBegin('xmlconfig', TType.STRING, 2)
oprot.writeString(self.xmlconfig.encode('utf-8') if sys.version_info[0] == 2 else self.xmlconfig)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(createClusterKeepClusterName_args)
createClusterKeepClusterName_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'hzVersion', 'UTF8', None, ), # 1
(2, TType.STRING, 'xmlconfig', 'UTF8', None, ), # 2
)
class createClusterKeepClusterName_result(object):
"""
Attributes:
- success
- serverException
"""
def __init__(self, success=None, serverException=None,):
self.success = success
self.serverException = serverException
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = Cluster()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.serverException = ServerException()
self.serverException.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('createClusterKeepClusterName_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.serverException is not None:
oprot.writeFieldBegin('serverException', TType.STRUCT, 1)
self.serverException.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(createClusterKeepClusterName_result)
createClusterKeepClusterName_result.thrift_spec = (
(0, TType.STRUCT, 'success', [Cluster, None], None, ), # 0
(1, TType.STRUCT, 'serverException', [ServerException, None], None, ), # 1
)
class startMember_args(object):
"""
Attributes:
- clusterId
"""
def __init__(self, clusterId=None,):
self.clusterId = clusterId
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.clusterId = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('startMember_args')
if self.clusterId is not None:
oprot.writeFieldBegin('clusterId', TType.STRING, 1)
oprot.writeString(self.clusterId.encode('utf-8') if sys.version_info[0] == 2 else self.clusterId)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(startMember_args)
startMember_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'clusterId', 'UTF8', None, ), # 1
)
class startMember_result(object):
"""
Attributes:
- success
- serverException
"""
def __init__(self, success=None, serverException=None,):
self.success = success
self.serverException = serverException
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = Member()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.serverException = ServerException()
self.serverException.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('startMember_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.serverException is not None:
oprot.writeFieldBegin('serverException', TType.STRUCT, 1)
self.serverException.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(startMember_result)
startMember_result.thrift_spec = (
(0, TType.STRUCT, 'success', [Member, None], None, ), # 0
(1, TType.STRUCT, 'serverException', [ServerException, None], None, ), # 1
)
class shutdownMember_args(object):
"""
Attributes:
- clusterId
- memberId
"""
def __init__(self, clusterId=None, memberId=None,):
self.clusterId = clusterId
self.memberId = memberId
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.clusterId = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.memberId = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('shutdownMember_args')
if self.clusterId is not None:
oprot.writeFieldBegin('clusterId', TType.STRING, 1)
oprot.writeString(self.clusterId.encode('utf-8') if sys.version_info[0] == 2 else self.clusterId)
oprot.writeFieldEnd()
if self.memberId is not None:
oprot.writeFieldBegin('memberId', TType.STRING, 2)
oprot.writeString(self.memberId.encode('utf-8') if sys.version_info[0] == 2 else self.memberId)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(shutdownMember_args)
shutdownMember_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'clusterId', 'UTF8', None, ), # 1
(2, TType.STRING, 'memberId', 'UTF8', None, ), # 2
)
class shutdownMember_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('shutdownMember_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(shutdownMember_result)
shutdownMember_result.thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
)
class terminateMember_args(object):
"""
Attributes:
- clusterId
- memberId
"""
def __init__(self, clusterId=None, memberId=None,):
self.clusterId = clusterId
self.memberId = memberId
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.clusterId = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.memberId = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('terminateMember_args')
if self.clusterId is not None:
oprot.writeFieldBegin('clusterId', TType.STRING, 1)
oprot.writeString(self.clusterId.encode('utf-8') if sys.version_info[0] == 2 else self.clusterId)
oprot.writeFieldEnd()
if self.memberId is not None:
oprot.writeFieldBegin('memberId', TType.STRING, 2)
oprot.writeString(self.memberId.encode('utf-8') if sys.version_info[0] == 2 else self.memberId)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(terminateMember_args)
terminateMember_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'clusterId', 'UTF8', None, ), # 1
(2, TType.STRING, 'memberId', 'UTF8', None, ), # 2
)
class terminateMember_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('terminateMember_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(terminateMember_result)
terminateMember_result.thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
)
class suspendMember_args(object):
"""
Attributes:
- clusterId
- memberId
"""
def __init__(self, clusterId=None, memberId=None,):
self.clusterId = clusterId
self.memberId = memberId
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.clusterId = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.memberId = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('suspendMember_args')
if self.clusterId is not None:
oprot.writeFieldBegin('clusterId', TType.STRING, 1)
oprot.writeString(self.clusterId.encode('utf-8') if sys.version_info[0] == 2 else self.clusterId)
oprot.writeFieldEnd()
if self.memberId is not None:
oprot.writeFieldBegin('memberId', TType.STRING, 2)
oprot.writeString(self.memberId.encode('utf-8') if sys.version_info[0] == 2 else self.memberId)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(suspendMember_args)
suspendMember_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'clusterId', 'UTF8', None, ), # 1
(2, TType.STRING, 'memberId', 'UTF8', None, ), # 2
)
class suspendMember_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('suspendMember_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(suspendMember_result)
suspendMember_result.thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
)
class resumeMember_args(object):
"""
Attributes:
- clusterId
- memberId
"""
def __init__(self, clusterId=None, memberId=None,):
self.clusterId = clusterId
self.memberId = memberId
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.clusterId = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.memberId = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('resumeMember_args')
if self.clusterId is not None:
oprot.writeFieldBegin('clusterId', TType.STRING, 1)
oprot.writeString(self.clusterId.encode('utf-8') if sys.version_info[0] == 2 else self.clusterId)
oprot.writeFieldEnd()
if self.memberId is not None:
oprot.writeFieldBegin('memberId', TType.STRING, 2)
oprot.writeString(self.memberId.encode('utf-8') if sys.version_info[0] == 2 else self.memberId)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(resumeMember_args)
resumeMember_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'clusterId', 'UTF8', None, ), # 1
(2, TType.STRING, 'memberId', 'UTF8', None, ), # 2
)
class resumeMember_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('resumeMember_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(resumeMember_result)
resumeMember_result.thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
)
class shutdownCluster_args(object):
"""
Attributes:
- clusterId
"""
def __init__(self, clusterId=None,):
self.clusterId = clusterId
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.clusterId = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('shutdownCluster_args')
if self.clusterId is not None:
oprot.writeFieldBegin('clusterId', TType.STRING, 1)
oprot.writeString(self.clusterId.encode('utf-8') if sys.version_info[0] == 2 else self.clusterId)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(shutdownCluster_args)
shutdownCluster_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'clusterId', 'UTF8', None, ), # 1
)
class shutdownCluster_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('shutdownCluster_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(shutdownCluster_result)
shutdownCluster_result.thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
)
class terminateCluster_args(object):
"""
Attributes:
- clusterId
"""
def __init__(self, clusterId=None,):
self.clusterId = clusterId
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.clusterId = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('terminateCluster_args')
if self.clusterId is not None:
oprot.writeFieldBegin('clusterId', TType.STRING, 1)
oprot.writeString(self.clusterId.encode('utf-8') if sys.version_info[0] == 2 else self.clusterId)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(terminateCluster_args)
terminateCluster_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'clusterId', 'UTF8', None, ), # 1
)
class terminateCluster_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('terminateCluster_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(terminateCluster_result)
terminateCluster_result.thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
)
class splitMemberFromCluster_args(object):
"""
Attributes:
- memberId
"""
def __init__(self, memberId=None,):
self.memberId = memberId
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.memberId = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('splitMemberFromCluster_args')
if self.memberId is not None:
oprot.writeFieldBegin('memberId', TType.STRING, 1)
oprot.writeString(self.memberId.encode('utf-8') if sys.version_info[0] == 2 else self.memberId)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(splitMemberFromCluster_args)
splitMemberFromCluster_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'memberId', 'UTF8', None, ), # 1
)
class splitMemberFromCluster_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = Cluster()
self.success.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('splitMemberFromCluster_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(splitMemberFromCluster_result)
splitMemberFromCluster_result.thrift_spec = (
(0, TType.STRUCT, 'success', [Cluster, None], None, ), # 0
)
class mergeMemberToCluster_args(object):
"""
Attributes:
- clusterId
- memberId
"""
def __init__(self, clusterId=None, memberId=None,):
self.clusterId = clusterId
self.memberId = memberId
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.clusterId = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.memberId = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('mergeMemberToCluster_args')
if self.clusterId is not None:
oprot.writeFieldBegin('clusterId', TType.STRING, 1)
oprot.writeString(self.clusterId.encode('utf-8') if sys.version_info[0] == 2 else self.clusterId)
oprot.writeFieldEnd()
if self.memberId is not None:
oprot.writeFieldBegin('memberId', TType.STRING, 2)
oprot.writeString(self.memberId.encode('utf-8') if sys.version_info[0] == 2 else self.memberId)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(mergeMemberToCluster_args)
mergeMemberToCluster_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'clusterId', 'UTF8', None, ), # 1
(2, TType.STRING, 'memberId', 'UTF8', None, ), # 2
)
class mergeMemberToCluster_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = Cluster()
self.success.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('mergeMemberToCluster_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(mergeMemberToCluster_result)
mergeMemberToCluster_result.thrift_spec = (
(0, TType.STRUCT, 'success', [Cluster, None], None, ), # 0
)
class executeOnController_args(object):
"""
Attributes:
- clusterId
- script
- lang
"""
def __init__(self, clusterId=None, script=None, lang=None,):
self.clusterId = clusterId
self.script = script
self.lang = lang
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.clusterId = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.script = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.lang = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('executeOnController_args')
if self.clusterId is not None:
oprot.writeFieldBegin('clusterId', TType.STRING, 1)
oprot.writeString(self.clusterId.encode('utf-8') if sys.version_info[0] == 2 else self.clusterId)
oprot.writeFieldEnd()
if self.script is not None:
oprot.writeFieldBegin('script', TType.STRING, 2)
oprot.writeString(self.script.encode('utf-8') if sys.version_info[0] == 2 else self.script)
oprot.writeFieldEnd()
if self.lang is not None:
oprot.writeFieldBegin('lang', TType.I32, 3)
oprot.writeI32(self.lang)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(executeOnController_args)
executeOnController_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'clusterId', 'UTF8', None, ), # 1
(2, TType.STRING, 'script', 'UTF8', None, ), # 2
(3, TType.I32, 'lang', None, None, ), # 3
)
class executeOnController_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = Response()
self.success.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('executeOnController_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(executeOnController_result)
executeOnController_result.thrift_spec = (
(0, TType.STRUCT, 'success', [Response, None], None, ), # 0
)
fix_spec(all_structs)
del all_structs
# fmt: on
|
aidanlister/django
|
refs/heads/master
|
tests/model_inheritance/same_model_name/__init__.py
|
12133432
| |
jathak/ok-client
|
refs/heads/master
|
tests/sources/ok_test/__init__.py
|
12133432
| |
drfrink/adminLTE_Django
|
refs/heads/master
|
webappdemo/models.py
|
4
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
# Create your models here.
|
osnoob/flkweb
|
refs/heads/master
|
apps/config.py
|
1
|
#!/usr/bin/env python
# -*-coding:utf-8-*-
__author__ = "Allen Woo"
class Role():
'''
用户角色权重,使用二进制
'''
PERMISSIONS = [
("USER",0b00000001), #普通用户
("ADMIN",0b01000000), #管理员
("ROOT",0b10000000) #超级管理员
]
USER = 0b00000001
ADMIN =0b01000000
ROOT =0b10000000
|
bobcyw/django
|
refs/heads/master
|
tests/admin_scripts/custom_templates/project_template/project_name/settings.py
|
738
|
# Django settings for {{ project_name }} test project.
|
mozilla/mozillians
|
refs/heads/master
|
mozillians/funfacts/migrations/0001_initial.py
|
9
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import mozillians.funfacts.models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='FunFact',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=255)),
('published', models.BooleanField(default=False, choices=[(True, b'Published'), (False, b'Unpublished')])),
('public_text', models.TextField()),
('number', models.TextField(max_length=1000, validators=[mozillians.funfacts.models._validate_query])),
('divisor', models.TextField(blank=True, max_length=1000, null=True, validators=[mozillians.funfacts.models._validate_query])),
],
options={
'ordering': ['created'],
},
bases=(models.Model,),
),
]
|
broferek/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/check_point/checkpoint_run_script.py
|
18
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: checkpoint_run_script
short_description: Run scripts on Check Point devices over Web Services API
description:
- Run scripts on Check Point devices.
All operations are performed over Web Services API.
version_added: "2.8"
author: "Ansible by Red Hat (@rcarrillocruz)"
options:
script_name:
description:
- Name of the script.
type: str
required: True
script:
description:
- Script body contents.
type: str
required: True
targets:
description:
- Targets the script should be run against. Can reference either name or UID.
type: list
required: True
"""
EXAMPLES = """
- name: Run script
checkpoint_run_script:
script_name: "List root"
script: ls -l /
targets:
- mycheckpointgw
"""
RETURN = """
checkpoint_run_script:
description: The checkpoint run script output.
returned: always.
type: list
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.checkpoint.checkpoint import publish, install_policy
import json
def run_script(module, connection):
script_name = module.params['script_name']
script = module.params['script']
targets = module.params['targets']
payload = {'script-name': script_name,
'script': script,
'targets': targets}
code, response = connection.send_request('/web_api/run-script', payload)
return code, response
def main():
argument_spec = dict(
script_name=dict(type='str', required=True),
script=dict(type='str', required=True),
targets=dict(type='list', required=True)
)
module = AnsibleModule(argument_spec=argument_spec)
connection = Connection(module._socket_path)
code, response = run_script(module, connection)
result = {'changed': True}
if code == 200:
result['checkpoint_run_script'] = response
else:
module.fail_json(msg='Checkpoint device returned error {0} with message {1}'.format(code, response))
module.exit_json(**result)
if __name__ == '__main__':
main()
|
terbolous/SickRage
|
refs/heads/master
|
lib/stevedore/driver.py
|
21
|
from .exception import NoMatches, MultipleMatches
from .named import NamedExtensionManager
class DriverManager(NamedExtensionManager):
"""Load a single plugin with a given name from the namespace.
:param namespace: The namespace for the entry points.
:type namespace: str
:param name: The name of the driver to load.
:type name: str
:param invoke_on_load: Boolean controlling whether to invoke the
object returned by the entry point after the driver is loaded.
:type invoke_on_load: bool
:param invoke_args: Positional arguments to pass when invoking
the object returned by the entry point. Only used if invoke_on_load
is True.
:type invoke_args: tuple
:param invoke_kwds: Named arguments to pass when invoking
the object returned by the entry point. Only used if invoke_on_load
is True.
:type invoke_kwds: dict
:param on_load_failure_callback: Callback function that will be called when
a entrypoint can not be loaded. The arguments that will be provided
when this is called (when an entrypoint fails to load) are
(manager, entrypoint, exception)
:type on_load_failure_callback: function
:param verify_requirements: Use setuptools to enforce the
dependencies of the plugin(s) being loaded. Defaults to False.
:type verify_requirements: bool
"""
def __init__(self, namespace, name,
invoke_on_load=False, invoke_args=(), invoke_kwds={},
on_load_failure_callback=None,
verify_requirements=False):
on_load_failure_callback = on_load_failure_callback \
or self._default_on_load_failure
super(DriverManager, self).__init__(
namespace=namespace,
names=[name],
invoke_on_load=invoke_on_load,
invoke_args=invoke_args,
invoke_kwds=invoke_kwds,
on_load_failure_callback=on_load_failure_callback,
verify_requirements=verify_requirements,
)
@staticmethod
def _default_on_load_failure(drivermanager, ep, err):
raise
@classmethod
def make_test_instance(cls, extension, namespace='TESTING',
propagate_map_exceptions=False,
on_load_failure_callback=None,
verify_requirements=False):
"""Construct a test DriverManager
Test instances are passed a list of extensions to work from rather
than loading them from entry points.
:param extension: Pre-configured Extension instance
:type extension: :class:`~stevedore.extension.Extension`
:param namespace: The namespace for the manager; used only for
identification since the extensions are passed in.
:type namespace: str
:param propagate_map_exceptions: Boolean controlling whether exceptions
are propagated up through the map call or whether they are logged
and then ignored
:type propagate_map_exceptions: bool
:param on_load_failure_callback: Callback function that will
be called when a entrypoint can not be loaded. The
arguments that will be provided when this is called (when
an entrypoint fails to load) are (manager, entrypoint,
exception)
:type on_load_failure_callback: function
:param verify_requirements: Use setuptools to enforce the
dependencies of the plugin(s) being loaded. Defaults to False.
:type verify_requirements: bool
:return: The manager instance, initialized for testing
"""
o = super(DriverManager, cls).make_test_instance(
[extension], namespace=namespace,
propagate_map_exceptions=propagate_map_exceptions,
on_load_failure_callback=on_load_failure_callback,
verify_requirements=verify_requirements)
return o
def _init_plugins(self, extensions):
super(DriverManager, self)._init_plugins(extensions)
if not self.extensions:
name = self._names[0]
raise NoMatches('No %r driver found, looking for %r' %
(self.namespace, name))
if len(self.extensions) > 1:
discovered_drivers = ','.join(e.entry_point_target
for e in self.extensions)
raise MultipleMatches('Multiple %r drivers found: %s' %
(self.namespace, discovered_drivers))
def __call__(self, func, *args, **kwds):
"""Invokes func() for the single loaded extension.
The signature for func() should be::
def func(ext, *args, **kwds):
pass
The first argument to func(), 'ext', is the
:class:`~stevedore.extension.Extension` instance.
Exceptions raised from within func() are logged and ignored.
:param func: Callable to invoke for each extension.
:param args: Variable arguments to pass to func()
:param kwds: Keyword arguments to pass to func()
:returns: List of values returned from func()
"""
results = self.map(func, *args, **kwds)
if results:
return results[0]
@property
def driver(self):
"""Returns the driver being used by this manager.
"""
ext = self.extensions[0]
return ext.obj if ext.obj else ext.plugin
|
HyperloopTeam/FullOpenMDAO
|
refs/heads/master
|
lib/python2.7/site-packages/openmdao.main-0.13.0-py2.7.egg/openmdao/main/test/test_replace3.py
|
1
|
import unittest
from openmdao.main.api import Component, Assembly, Driver, set_as_top
from openmdao.main.datatypes.api import Float
class BaseComp(Component):
# inputs: None
# outputs: myout
myout = Float(3.14, iotype='out')
def execute(self):
self.myout *= 0.5
class ExtendedComp(BaseComp):
# inputs: myin
# outputs: myout, myout2
myout2 = Float(iotype='out')
myin = Float(iotype='in')
def execute(self):
self.myout = self.myin / 2.0
self.myout2 = self.myin * 2.0
class TestComp(ExtendedComp):
def execute(self):
self.myout = self.myin + 10.0
self.myout2 = self.myin - 10.0
class BaseAsym(Assembly):
# inputs: None
# outputs: myout
def configure(self):
self.add('bc', BaseComp())
self.driver.workflow.add(['bc'])
self.create_passthrough('bc.myout')
class ExtendedAsym(BaseAsym):
# inputs: myin
# outputs: myout, myout2
def configure(self):
super(ExtendedAsym, self).configure()
self.replace('bc', ExtendedComp())
self.create_passthrough('bc.myin')
self.create_passthrough('bc.myout2')
class TestAsym(ExtendedAsym):
# inputs: myin
# outputs: myout, myout2
def configure(self):
super(TestAsym, self).configure()
self.replace('bc', TestComp())
class LargeAsym(Assembly):
# inputs: myin
# outputs: myout, myout2
def configure(self):
self.add('ea', ExtendedAsym())
self.driver.workflow.add(['ea'])
self.create_passthrough('ea.myin')
self.create_passthrough('ea.myout')
self.create_passthrough('ea.myout2')
class TestLargeAsym(LargeAsym):
def __init__(self):
super(TestLargeAsym, self).__init__()
def configure(self):
super(TestLargeAsym, self).configure()
self.replace('ea', TestAsym())
class Replace3TestCase(unittest.TestCase):
def test_TestAsym(self):
mytest = set_as_top(TestAsym())
mytest.myin = 2
self.assertEqual(mytest.myout, 3.14)
self.assertEqual(mytest.myout2, 0)
mytest.run()
self.assertEqual(mytest.myin, 2)
self.assertEqual(mytest.myout, 12)
self.assertEqual(mytest.myout2, -8)
def test_LargeAsym(self):
largetest = set_as_top(TestLargeAsym())
largetest.myin = 2
largetest.run()
self.assertEqual(largetest.myin, 2)
self.assertEqual(largetest.ea.myin, 2)
self.assertEqual(largetest.ea.myout, 12)
self.assertEqual(largetest.ea.myout2, -8)
self.assertEqual(largetest.myout, 12)
self.assertEqual(largetest.myout2,-8)
if __name__=="__main__":
unittest.main()
|
sankhesh/VTK
|
refs/heads/master
|
ThirdParty/Twisted/twisted/python/dist.py
|
23
|
# -*- test-case-name: twisted.python.test.test_dist -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Distutils convenience functionality.
Don't use this outside of Twisted.
Maintainer: Christopher Armstrong
"""
from distutils.command import build_scripts, install_data, build_ext
from distutils.errors import CompileError
from distutils import core
from distutils.core import Extension
import fnmatch
import os
import platform
import sys
from twisted import copyright
from twisted.python.compat import execfile
STATIC_PACKAGE_METADATA = dict(
name="Twisted",
version=copyright.version,
description="An asynchronous networking framework written in Python",
author="Twisted Matrix Laboratories",
author_email="twisted-python@twistedmatrix.com",
maintainer="Glyph Lefkowitz",
maintainer_email="glyph@twistedmatrix.com",
url="http://twistedmatrix.com/",
license="MIT",
long_description="""\
An extensible framework for Python programming, with special focus
on event-based network programming and multiprotocol integration.
""",
classifiers=[
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
],
)
twisted_subprojects = ["conch", "lore", "mail", "names",
"news", "pair", "runner", "web",
"words"]
class ConditionalExtension(Extension):
"""
An extension module that will only be compiled if certain conditions are
met.
@param condition: A callable of one argument which returns True or False to
indicate whether the extension should be built. The argument is an
instance of L{build_ext_twisted}, which has useful methods for checking
things about the platform.
"""
def __init__(self, *args, **kwargs):
self.condition = kwargs.pop("condition", lambda builder: True)
Extension.__init__(self, *args, **kwargs)
def setup(**kw):
"""
An alternative to distutils' setup() which is specially designed
for Twisted subprojects.
Pass twisted_subproject=projname if you want package and data
files to automatically be found for you.
@param conditionalExtensions: Extensions to optionally build.
@type conditionalExtensions: C{list} of L{ConditionalExtension}
"""
return core.setup(**get_setup_args(**kw))
def get_setup_args(**kw):
if 'twisted_subproject' in kw:
if 'twisted' not in os.listdir('.'):
raise RuntimeError("Sorry, you need to run setup.py from the "
"toplevel source directory.")
projname = kw['twisted_subproject']
projdir = os.path.join('twisted', projname)
kw['packages'] = getPackages(projdir, parent='twisted')
kw['version'] = getVersion(projname)
plugin = "twisted/plugins/twisted_" + projname + ".py"
if os.path.exists(plugin):
kw.setdefault('py_modules', []).append(
plugin.replace("/", ".")[:-3])
kw['data_files'] = getDataFiles(projdir, parent='twisted')
del kw['twisted_subproject']
else:
if 'plugins' in kw:
py_modules = []
for plg in kw['plugins']:
py_modules.append("twisted.plugins." + plg)
kw.setdefault('py_modules', []).extend(py_modules)
del kw['plugins']
if 'cmdclass' not in kw:
kw['cmdclass'] = {
'install_data': install_data_twisted,
'build_scripts': build_scripts_twisted}
if "conditionalExtensions" in kw:
extensions = kw["conditionalExtensions"]
del kw["conditionalExtensions"]
if 'ext_modules' not in kw:
# This is a workaround for distutils behavior; ext_modules isn't
# actually used by our custom builder. distutils deep-down checks
# to see if there are any ext_modules defined before invoking
# the build_ext command. We need to trigger build_ext regardless
# because it is the thing that does the conditional checks to see
# if it should build any extensions. The reason we have to delay
# the conditional checks until then is that the compiler objects
# are not yet set up when this code is executed.
kw["ext_modules"] = extensions
class my_build_ext(build_ext_twisted):
conditionalExtensions = extensions
kw.setdefault('cmdclass', {})['build_ext'] = my_build_ext
return kw
def getVersion(proj, base="twisted"):
"""
Extract the version number for a given project.
@param proj: the name of the project. Examples are "core",
"conch", "words", "mail".
@rtype: str
@returns: The version number of the project, as a string like
"2.0.0".
"""
if proj == 'core':
vfile = os.path.join(base, '_version.py')
else:
vfile = os.path.join(base, proj, '_version.py')
ns = {'__name__': 'Nothing to see here'}
execfile(vfile, ns)
return ns['version'].base()
# Names that are exluded from globbing results:
EXCLUDE_NAMES = ["{arch}", "CVS", ".cvsignore", "_darcs",
"RCS", "SCCS", ".svn"]
EXCLUDE_PATTERNS = ["*.py[cdo]", "*.s[ol]", ".#*", "*~", "*.py"]
def _filterNames(names):
"""
Given a list of file names, return those names that should be copied.
"""
names = [n for n in names
if n not in EXCLUDE_NAMES]
# This is needed when building a distro from a working
# copy (likely a checkout) rather than a pristine export:
for pattern in EXCLUDE_PATTERNS:
names = [n for n in names
if (not fnmatch.fnmatch(n, pattern))
and (not n.endswith('.py'))]
return names
def relativeTo(base, relativee):
"""
Gets 'relativee' relative to 'basepath'.
i.e.,
>>> relativeTo('/home/', '/home/radix/')
'radix'
>>> relativeTo('.', '/home/radix/Projects/Twisted') # curdir is /home/radix
'Projects/Twisted'
The 'relativee' must be a child of 'basepath'.
"""
basepath = os.path.abspath(base)
relativee = os.path.abspath(relativee)
if relativee.startswith(basepath):
relative = relativee[len(basepath):]
if relative.startswith(os.sep):
relative = relative[1:]
return os.path.join(base, relative)
raise ValueError("%s is not a subpath of %s" % (relativee, basepath))
def getDataFiles(dname, ignore=None, parent=None):
"""
Get all the data files that should be included in this distutils Project.
'dname' should be the path to the package that you're distributing.
'ignore' is a list of sub-packages to ignore. This facilitates
disparate package hierarchies. That's a fancy way of saying that
the 'twisted' package doesn't want to include the 'twisted.conch'
package, so it will pass ['conch'] as the value.
'parent' is necessary if you're distributing a subpackage like
twisted.conch. 'dname' should point to 'twisted/conch' and 'parent'
should point to 'twisted'. This ensures that your data_files are
generated correctly, only using relative paths for the first element
of the tuple ('twisted/conch/*').
The default 'parent' is the current working directory.
"""
parent = parent or "."
ignore = ignore or []
result = []
for directory, subdirectories, filenames in os.walk(dname):
resultfiles = []
for exname in EXCLUDE_NAMES:
if exname in subdirectories:
subdirectories.remove(exname)
for ig in ignore:
if ig in subdirectories:
subdirectories.remove(ig)
for filename in _filterNames(filenames):
resultfiles.append(filename)
if resultfiles:
result.append((relativeTo(parent, directory),
[relativeTo(parent,
os.path.join(directory, filename))
for filename in resultfiles]))
return result
def getExtensions():
"""
Get all extensions from core and all subprojects.
"""
extensions = []
if not sys.platform.startswith('java'):
for dir in os.listdir("twisted") + [""]:
topfiles = os.path.join("twisted", dir, "topfiles")
if os.path.isdir(topfiles):
ns = {}
setup_py = os.path.join(topfiles, "setup.py")
execfile(setup_py, ns, ns)
if "extensions" in ns:
extensions.extend(ns["extensions"])
return extensions
def getPackages(dname, pkgname=None, results=None, ignore=None, parent=None):
"""
Get all packages which are under dname. This is necessary for
Python 2.2's distutils. Pretty similar arguments to getDataFiles,
including 'parent'.
"""
parent = parent or ""
prefix = []
if parent:
prefix = [parent]
bname = os.path.basename(dname)
ignore = ignore or []
if bname in ignore:
return []
if results is None:
results = []
if pkgname is None:
pkgname = []
subfiles = os.listdir(dname)
abssubfiles = [os.path.join(dname, x) for x in subfiles]
if '__init__.py' in subfiles:
results.append(prefix + pkgname + [bname])
for subdir in filter(os.path.isdir, abssubfiles):
getPackages(subdir, pkgname=pkgname + [bname],
results=results, ignore=ignore,
parent=parent)
res = ['.'.join(result) for result in results]
return res
def getAllScripts():
# "" is included because core scripts are directly in bin/
projects = [''] + [x for x in os.listdir('bin')
if os.path.isdir(os.path.join("bin", x))
and x in twisted_subprojects]
scripts = []
for i in projects:
scripts.extend(getScripts(i))
return scripts
def getScripts(projname, basedir=''):
"""
Returns a list of scripts for a Twisted subproject; this works in
any of an SVN checkout, a project-specific tarball.
"""
scriptdir = os.path.join(basedir, 'bin', projname)
if not os.path.isdir(scriptdir):
# Probably a project-specific tarball, in which case only this
# project's bins are included in 'bin'
scriptdir = os.path.join(basedir, 'bin')
if not os.path.isdir(scriptdir):
return []
thingies = os.listdir(scriptdir)
for specialExclusion in ['.svn', '_preamble.py', '_preamble.pyc']:
if specialExclusion in thingies:
thingies.remove(specialExclusion)
return filter(os.path.isfile,
[os.path.join(scriptdir, x) for x in thingies])
## Helpers and distutil tweaks
class build_scripts_twisted(build_scripts.build_scripts):
"""
Renames scripts so they end with '.py' on Windows.
"""
def run(self):
build_scripts.build_scripts.run(self)
if not os.name == "nt":
return
for f in os.listdir(self.build_dir):
fpath = os.path.join(self.build_dir, f)
if not fpath.endswith(".py"):
pypath = fpath + ".py"
if os.path.exists(pypath):
os.unlink(pypath)
os.rename(fpath, pypath)
class install_data_twisted(install_data.install_data):
"""
I make sure data files are installed in the package directory.
"""
def finalize_options(self):
self.set_undefined_options('install',
('install_lib', 'install_dir')
)
install_data.install_data.finalize_options(self)
class build_ext_twisted(build_ext.build_ext):
"""
Allow subclasses to easily detect and customize Extensions to
build at install-time.
"""
def prepare_extensions(self):
"""
Prepare the C{self.extensions} attribute (used by
L{build_ext.build_ext}) by checking which extensions in
L{conditionalExtensions} should be built. In addition, if we are
building on NT, define the WIN32 macro to 1.
"""
# always define WIN32 under Windows
if os.name == 'nt':
self.define_macros = [("WIN32", 1)]
else:
self.define_macros = []
# On Solaris 10, we need to define the _XOPEN_SOURCE and
# _XOPEN_SOURCE_EXTENDED macros to build in order to gain access to
# the msg_control, msg_controllen, and msg_flags members in
# sendmsg.c. (according to
# http://stackoverflow.com/questions/1034587). See the documentation
# of X/Open CAE in the standards(5) man page of Solaris.
if sys.platform.startswith('sunos'):
self.define_macros.append(('_XOPEN_SOURCE', 1))
self.define_macros.append(('_XOPEN_SOURCE_EXTENDED', 1))
self.extensions = [x for x in self.conditionalExtensions
if x.condition(self)]
for ext in self.extensions:
ext.define_macros.extend(self.define_macros)
def build_extensions(self):
"""
Check to see which extension modules to build and then build them.
"""
self.prepare_extensions()
build_ext.build_ext.build_extensions(self)
def _remove_conftest(self):
for filename in ("conftest.c", "conftest.o", "conftest.obj"):
try:
os.unlink(filename)
except EnvironmentError:
pass
def _compile_helper(self, content):
conftest = open("conftest.c", "w")
try:
conftest.write(content)
conftest.close()
try:
self.compiler.compile(["conftest.c"], output_dir='')
except CompileError:
return False
return True
finally:
self._remove_conftest()
def _check_header(self, header_name):
"""
Check if the given header can be included by trying to compile a file
that contains only an #include line.
"""
self.compiler.announce("checking for %s ..." % header_name, 0)
return self._compile_helper("#include <%s>\n" % header_name)
def _checkCPython(sys=sys, platform=platform):
"""
Checks if this implementation is CPython.
This uses C{platform.python_implementation}.
This takes C{sys} and C{platform} kwargs that by default use the real
modules. You shouldn't care about these -- they are for testing purposes
only.
@return: C{False} if the implementation is definitely not CPython, C{True}
otherwise.
"""
return platform.python_implementation() == "CPython"
_isCPython = _checkCPython()
|
DEKHTIARJonathan/BilletterieUTC
|
refs/heads/master
|
badgingServer/Install/swigwin-3.0.7/Examples/test-suite/python/inplaceadd_runme.py
|
4
|
import inplaceadd
a = inplaceadd.A(7)
a += 5
if a.val != 12:
print a.val
raise RuntimeError
a -= 5
if a.val != 7:
raise RuntimeError
a *= 2
if a.val != 14:
raise RuntimeError
a += a
if a.val != 28:
raise RuntimeError
|
gmorph/ardupilot
|
refs/heads/master
|
mk/PX4/Tools/genmsg/src/genmsg/__init__.py
|
215
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from . base import EXT_MSG, EXT_SRV, SEP, log, plog, InvalidMsgSpec, log_verbose, MsgGenerationException
from . gentools import compute_md5, compute_full_text, compute_md5_text
from . names import resource_name_base, package_resource_name, is_legal_resource_base_name, \
resource_name_package, resource_name, is_legal_resource_name
from . msgs import HEADER, TIME, DURATION, MsgSpec, Constant, Field
from . msg_loader import MsgNotFound, MsgContext, load_depends, load_msg_by_type, load_srv_by_type
from . srvs import SrvSpec
|
rackerlabs/lunr
|
refs/heads/master
|
testlunr/functional/test_lock.py
|
2
|
#! /usr/bin/env python
# Copyright (c) 2011-2016 Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import signal
import unittest
from tempfile import mkdtemp
from shutil import rmtree
from webob.exc import HTTPConflict
from lunr.common.lock import ResourceFile
from lunr.storage.controller.base import claim, lock
class LockTest(unittest.TestCase):
def setUp(self):
self.path = mkdtemp()
self.lockfile = os.path.join(self.path, 'lock')
def tearDown(self):
rmtree(self.path)
def test_lock_used(self):
resource = ResourceFile(self.lockfile)
child = os.fork()
if not child:
# Child grabs resource and sleeps until killed.
with resource:
resource.acquire({'pid': os.getpid()})
while True:
time.sleep(5)
time.sleep(0.2)
with resource:
used = resource.used()
self.assert_(used)
self.assertEquals(used['pid'], child)
os.kill(child, signal.SIGTERM)
os.waitpid(child, 0)
class Blank(object):
pass
class FakeController(object):
def __init__(self, *args, **kwargs):
self.id = kwargs.get('id', 'fakeid')
self.volume_id = kwargs.get('volume_id', 'fake_vol_id')
self.app = Blank()
self.app.helper = Blank()
self.app.helper.volumes = Blank()
self.app.helper.volumes.run_dir = kwargs.get('run_dir', 'run_dir')
class FakeRequest(object):
def __init__(self, method='GET', path="/somewhere", **kwargs):
self.method = method
self.path = path
class StorageLockTest(unittest.TestCase):
def setUp(self):
self.path = mkdtemp()
self.lockfile = os.path.join(self.path, 'lock')
self.children = []
def tearDown(self):
rmtree(self.path)
for child in self.children:
try:
os.kill(child, signal.SIGTERM)
os.waitpid(child, 0)
except OSError:
pass
def test_interruptible_claim(self):
resource = ResourceFile(self.lockfile)
child = os.fork()
if not child:
# Child grabs resource and sleeps until killed.
with resource:
resource.acquire({'pid': os.getpid(),
'uri': 'child'})
time.sleep(2)
with resource:
resource.acquire({'interruptible': True})
while(True):
time.sleep(0.5)
else:
self.children.append(child)
# This is racy. Child is originally uninterruptible, but after a short
# sleep, he marks himself interruptible
time.sleep(1)
info = {'uri': 'parent', 'pid': os.getpid()}
self.assertRaises(HTTPConflict, claim, resource, info)
time.sleep(2)
claim(resource, info)
time.sleep(1)
# should be killed by now.
pid, status = os.waitpid(child, os.WNOHANG)
self.assertEquals(pid, child)
with resource:
used = resource.used()
self.assert_(used)
self.assertEquals(used['pid'], os.getpid())
self.assertEquals(used['uri'], 'parent')
# Bug: lock.acquire only updates the keys you give it.
# So I'm marked interruptible unknowingly.
# lunr.storage.controller.base.inspect was updated to always
# set interruptible to False because of this.
self.assertEquals(used['interruptible'], True)
def test_interruptible_lock(self):
resource = ResourceFile(self.lockfile)
fake_controller = FakeController(volume_id='foo', id='bar',
run_dir='somewhere')
req_1 = FakeRequest(method='PUT', path="something")
req_2 = FakeRequest(method='PUT', path="something/else")
@lock(self.lockfile)
def killable_func(obj, req, resource):
with resource:
resource.update({'interruptible': True})
while True:
time.sleep(1)
@lock(self.lockfile)
def killing_func(obj, req, resource):
while True:
time.sleep(1)
# Go killable func child!
child1 = os.fork()
if not child1:
killable_func(fake_controller, req_1)
else:
self.children.append(child1)
time.sleep(1)
with resource:
used = resource.used()
self.assertEquals(used['interruptible'], True)
self.assertEquals(used['uri'], 'PUT something')
self.assertEquals(used['pid'], child1)
# Go killing func child!
child2 = os.fork()
if not child2:
killing_func(fake_controller, req_2)
else:
self.children.append(child2)
time.sleep(1)
with resource:
used = resource.used()
self.assertEquals(used['interruptible'], False)
self.assertEquals(used['uri'], 'PUT something/else')
self.assertEquals(used['pid'], child2)
if __name__ == "__main__":
unittest.main()
|
phw/weblate
|
refs/heads/master
|
weblate/trans/tests/test_edit.py
|
2
|
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2015 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Tests for translation views.
"""
import time
from django.core.urlresolvers import reverse
from weblate.trans.tests.test_views import ViewTestCase
from weblate.trans.models import Change
class EditTest(ViewTestCase):
'''
Tests for manipulating translation.
'''
has_plurals = True
def setUp(self):
super(EditTest, self).setUp()
self.translation = self.get_translation()
self.translate_url = self.translation.get_translate_url()
def test_edit(self):
response = self.edit_unit(
'Hello, world!\n',
'Nazdar svete!\n'
)
# We should get to second message
self.assertRedirectsOffset(response, self.translate_url, 1)
unit = self.get_unit()
self.assertEqual(unit.target, 'Nazdar svete!\n')
self.assertEqual(len(unit.checks()), 0)
self.assertTrue(unit.translated)
self.assertFalse(unit.fuzzy)
self.assertBackend(1)
# Test that second edit with no change does not break anything
response = self.edit_unit(
'Hello, world!\n',
'Nazdar svete!\n'
)
# We should get to second message
self.assertRedirectsOffset(response, self.translate_url, 1)
unit = self.get_unit()
self.assertEqual(unit.target, 'Nazdar svete!\n')
self.assertEqual(len(unit.checks()), 0)
self.assertTrue(unit.translated)
self.assertFalse(unit.fuzzy)
self.assertBackend(1)
# Test that third edit still works
response = self.edit_unit(
'Hello, world!\n',
'Ahoj svete!\n'
)
# We should get to second message
self.assertRedirectsOffset(response, self.translate_url, 1)
unit = self.get_unit()
self.assertEqual(unit.target, 'Ahoj svete!\n')
self.assertEqual(len(unit.checks()), 0)
self.assertTrue(unit.translated)
self.assertFalse(unit.fuzzy)
self.assertBackend(1)
def test_edit_locked(self):
self.subproject.locked = True
self.subproject.save()
response = self.edit_unit(
'Hello, world!\n',
'Nazdar svete!\n'
)
# We should get to second message
self.assertContains(
response,
'This translation is currently locked for updates!'
)
self.assertBackend(0)
def test_plurals(self):
'''
Test plural editing.
'''
if not self.has_plurals:
return
response = self.edit_unit(
'Orangutan',
u'Opice má %d banán.\n',
target_1=u'Opice má %d banány.\n',
target_2=u'Opice má %d banánů.\n',
)
# We should get to second message
self.assertRedirectsOffset(response, self.translate_url, 1)
# Check translations
unit = self.get_unit('Orangutan')
plurals = unit.get_target_plurals()
self.assertEqual(len(plurals), 3)
self.assertEqual(
plurals[0],
u'Opice má %d banán.\n',
)
self.assertEqual(
plurals[1],
u'Opice má %d banány.\n',
)
self.assertEqual(
plurals[2],
u'Opice má %d banánů.\n',
)
def test_merge(self):
# Translate unit to have something to start with
response = self.edit_unit(
'Hello, world!\n',
'Nazdar svete!\n'
)
unit = self.get_unit()
# Try the merge
response = self.client.get(
self.translate_url,
{'checksum': unit.checksum, 'merge': unit.id}
)
self.assertBackend(1)
# We should stay on same message
self.assertRedirectsOffset(response, self.translate_url, unit.position)
# Test error handling
unit2 = self.translation.unit_set.get(
source='Thank you for using Weblate.'
)
response = self.client.get(
self.translate_url,
{'checksum': unit.checksum, 'merge': unit2.id}
)
self.assertContains(response, 'Can not merge different messages!')
def test_revert(self):
source = 'Hello, world!\n'
target = 'Nazdar svete!\n'
target_2 = 'Hei maailma!\n'
self.edit_unit(
source,
target
)
# Ensure other edit gets different timestamp
time.sleep(1)
self.edit_unit(
source,
target_2
)
unit = self.get_unit()
changes = Change.objects.content().filter(unit=unit)
self.assertEqual(changes[1].target, target)
self.assertEqual(changes[0].target, target_2)
self.assertBackend(1)
# revert it
self.client.get(
self.translate_url,
{'checksum': unit.checksum, 'revert': changes[1].id}
)
unit = self.get_unit()
self.assertEqual(unit.target, target)
# check that we cannot revert to string from another translation
self.edit_unit(
'Thank you for using Weblate.',
'Kiitoksia Weblaten kaytosta.'
)
unit2 = self.get_unit(
source='Thank you for using Weblate.'
)
change = Change.objects.filter(unit=unit2)[0]
response = self.client.get(
self.translate_url,
{'checksum': unit.checksum, 'revert': change.id}
)
self.assertContains(response, "Can not revert to different unit")
self.assertBackend(2)
def test_edit_message(self):
# Save with failing check
response = self.edit_unit(
'Hello, world!\n',
'Nazdar svete!',
commit_message='Fixing issue #666',
)
# We should get to second message
self.assertRedirectsOffset(response, self.translate_url, 1)
# Did the commit message got stored?
translation = self.get_translation()
self.assertEqual(
'Fixing issue #666',
translation.commit_message
)
# Try commiting
translation.commit_pending(self.get_request('/'))
def test_edit_fixup(self):
# Save with failing check
response = self.edit_unit(
'Hello, world!\n',
'Nazdar svete!'
)
# We should get to second message
self.assertRedirectsOffset(response, self.translate_url, 1)
unit = self.get_unit()
self.assertEqual(unit.target, 'Nazdar svete!\n')
self.assertFalse(unit.has_failing_check)
self.assertEqual(len(unit.checks()), 0)
self.assertEqual(len(unit.active_checks()), 0)
self.assertEqual(unit.translation.failing_checks, 0)
self.assertBackend(1)
def test_edit_check(self):
# Save with failing check
response = self.edit_unit(
'Hello, world!\n',
'Hello, world!\n',
)
# We should stay on current message
self.assertRedirectsOffset(response, self.translate_url, 0)
unit = self.get_unit()
self.assertEqual(unit.target, 'Hello, world!\n')
self.assertTrue(unit.has_failing_check)
self.assertEqual(len(unit.checks()), 1)
self.assertEqual(len(unit.active_checks()), 1)
self.assertEqual(unit.translation.failing_checks, 1)
# Ignore check
check_id = unit.checks()[0].id
response = self.client.get(
reverse('js-ignore-check', kwargs={'check_id': check_id})
)
self.assertContains(response, 'ok')
# Should have one less check
unit = self.get_unit()
self.assertFalse(unit.has_failing_check)
self.assertEqual(len(unit.checks()), 1)
self.assertEqual(len(unit.active_checks()), 0)
self.assertEqual(unit.translation.failing_checks, 0)
# Save with no failing checks
response = self.edit_unit(
'Hello, world!\n',
'Nazdar svete!\n'
)
# We should stay on current message
self.assertRedirectsOffset(response, self.translate_url, 1)
unit = self.get_unit()
self.assertEqual(unit.target, 'Nazdar svete!\n')
self.assertFalse(unit.has_failing_check)
self.assertEqual(len(unit.checks()), 0)
self.assertEqual(unit.translation.failing_checks, 0)
self.assertBackend(1)
def test_commit_push(self):
response = self.edit_unit(
'Hello, world!\n',
'Nazdar svete!\n'
)
# We should get to second message
self.assertRedirectsOffset(response, self.translate_url, 1)
self.assertTrue(self.translation.repo_needs_commit())
self.assertTrue(self.subproject.repo_needs_commit())
self.assertTrue(self.subproject.project.repo_needs_commit())
self.translation.commit_pending(self.get_request('/'))
self.assertFalse(self.translation.repo_needs_commit())
self.assertFalse(self.subproject.repo_needs_commit())
self.assertFalse(self.subproject.project.repo_needs_commit())
self.assertTrue(self.translation.repo_needs_push())
self.assertTrue(self.subproject.repo_needs_push())
self.assertTrue(self.subproject.project.repo_needs_push())
self.translation.do_push(self.get_request('/'))
self.assertFalse(self.translation.repo_needs_push())
self.assertFalse(self.subproject.repo_needs_push())
self.assertFalse(self.subproject.project.repo_needs_push())
def test_fuzzy(self):
'''
Test for fuzzy flag handling.
'''
unit = self.get_unit()
self.assertFalse(unit.fuzzy)
self.edit_unit(
'Hello, world!\n',
'Nazdar svete!\n',
fuzzy='yes'
)
unit = self.get_unit()
self.assertTrue(unit.fuzzy)
self.assertEqual(unit.target, 'Nazdar svete!\n')
self.assertFalse(unit.has_failing_check)
self.edit_unit(
'Hello, world!\n',
'Nazdar svete!\n',
)
unit = self.get_unit()
self.assertFalse(unit.fuzzy)
self.assertEqual(unit.target, 'Nazdar svete!\n')
self.assertFalse(unit.has_failing_check)
self.edit_unit(
'Hello, world!\n',
'Nazdar svete!\n',
fuzzy='yes'
)
unit = self.get_unit()
self.assertTrue(unit.fuzzy)
self.assertEqual(unit.target, 'Nazdar svete!\n')
self.assertFalse(unit.has_failing_check)
class EditResourceTest(EditTest):
has_plurals = False
def create_subproject(self):
return self.create_android()
class EditResourceSourceTest(ViewTestCase):
"""Source strings (template) editing."""
has_plurals = False
def test_edit(self):
translation = self.get_translation()
translate_url = translation.get_translate_url()
response = self.edit_unit(
'Hello, world!\n',
'Nazdar svete!\n'
)
# We should get to second message
self.assertRedirectsOffset(response, translate_url, 1)
unit = self.get_unit('Nazdar svete!\n')
self.assertEqual(unit.target, 'Nazdar svete!\n')
self.assertEqual(len(unit.checks()), 0)
self.assertTrue(unit.translated)
self.assertFalse(unit.fuzzy)
self.assertBackend(4)
def get_translation(self):
return self.subproject.translation_set.get(
language_code='en'
)
def create_subproject(self):
return self.create_android()
class EditMercurialTest(EditTest):
def create_subproject(self):
return self.create_po_mercurial()
class EditPoMonoTest(EditTest):
def create_subproject(self):
return self.create_po_mono()
class EditIphoneTest(EditTest):
has_plurals = False
def create_subproject(self):
return self.create_iphone()
class EditJSONTest(EditTest):
has_plurals = False
def create_subproject(self):
return self.create_json()
class EditJSONMonoTest(EditTest):
has_plurals = False
def create_subproject(self):
return self.create_json_mono()
class EditJavaTest(EditTest):
has_plurals = False
def create_subproject(self):
return self.create_java()
class EditXliffTest(EditTest):
has_plurals = False
def create_subproject(self):
return self.create_xliff()
class EditXliffMonoTest(EditTest):
has_plurals = False
def create_subproject(self):
return self.create_xliff_mono()
class EditLinkTest(EditTest):
def create_subproject(self):
return self.create_link()
class EditTSTest(EditTest):
def create_subproject(self):
return self.create_ts()
class EditTSMonoTest(EditTest):
has_plurals = False
def create_subproject(self):
return self.create_ts_mono()
class ZenViewTest(ViewTestCase):
def test_zen(self):
response = self.client.get(
reverse('zen', kwargs=self.kw_translation)
)
self.assertContains(
response,
'Thank you for using Weblate.'
)
self.assertContains(
response,
'Orangutan has %d bananas'
)
self.assertContains(
response,
'You have reached end of translating.'
)
def test_zen_invalid(self):
response = self.client.get(
reverse('zen', kwargs=self.kw_translation),
{'type': 'nonexisting-type'},
follow=True
)
self.assertContains(
response,
'nonexisting-type is not one of the available choices'
)
def test_load_zen(self):
response = self.client.get(
reverse('load_zen', kwargs=self.kw_translation)
)
self.assertContains(
response,
'Thank you for using Weblate.'
)
self.assertContains(
response,
'Orangutan has %d bananas'
)
self.assertContains(
response,
'You have reached end of translating.'
)
def test_load_zen_offset(self):
response = self.client.get(
reverse('load_zen', kwargs=self.kw_translation),
{'offset': '1'}
)
self.assertNotContains(
response,
'Hello, world'
)
self.assertContains(
response,
'Orangutan has %d bananas'
)
response = self.client.get(
reverse('load_zen', kwargs=self.kw_translation),
{'offset': 'bug'}
)
self.assertContains(
response,
'Hello, world'
)
def test_save_zen(self):
unit = self.get_unit()
params = {
'checksum': unit.checksum,
'target_0': 'Zen translation'
}
response = self.client.post(
reverse('save_zen', kwargs=self.kw_translation),
params
)
self.assertContains(
response,
'Following fixups were applied to translation: '
'Trailing and leading whitespace'
)
def test_save_zen_lock(self):
self.subproject.locked = True
self.subproject.save()
unit = self.get_unit()
params = {
'checksum': unit.checksum,
'target_0': 'Zen translation'
}
response = self.client.post(
reverse('save_zen', kwargs=self.kw_translation),
params
)
self.assertContains(
response,
'You don't have privileges to save translations!',
)
|
zeedunk/statsite
|
refs/heads/master
|
sinks/influxdb.py
|
5
|
"""
Supports flushing statsite metrics to InfluxDB
"""
import sys
import httplib, urllib, logging, json, re
##
# InfluxDB sink for statsite
# ==========================
#
# Use with the following stream command:
#
# stream_command = python sinks/influxdb.py influxdb.ini INFO
#
# The InfluxDB sink takes an INI format configuration file as a first
# argument and log level as a second argument.
# The following is an example configuration:
#
# Configuration example:
# ---------------------
#
# [influxdb]
# host = 127.0.0.1
# port = 8086
# database = dbname
# username = root
# password = root
#
#
# version = 0.9
# prefix = statsite
# timeout = 10
#
# Options:
# --------
# - version: InfluxDB version (by default 0.9)
# - prefix: A prefix to add to the keys
# - timeout: The timeout blocking operations (like connection attempts)
# will timeout after that many seconds (if it is not given, the global default timeout setting is used)
###
class InfluxDBStore(object):
def __init__(self, cfg="/etc/statsite/influxdb.ini", lvl="INFO"):
"""
Implements an interface that allows metrics to be persisted to InfluxDB.
Raises a :class:`ValueError` on bad arguments or `Exception` on missing
configuration section.
:Parameters:
- `cfg`: INI configuration file.
- `lvl`: logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
"""
self.sink_name = "statsite-influxdb"
self.sink_version = "0.0.1"
self.logger = logging.getLogger("statsite.influxdb")
self.logger.setLevel(lvl)
self.load(cfg)
def load(self, cfg):
"""
Loads configuration from an INI format file.
"""
import ConfigParser
ini = ConfigParser.RawConfigParser()
ini.read(cfg)
sect = "influxdb"
if not ini.has_section(sect):
raise Exception("Can not locate config section '" + sect + "'")
if ini.has_option(sect, 'host'):
self.host = ini.get(sect, 'host')
else:
raise ValueError("host must be set in config")
if ini.has_option(sect, 'port'):
self.port = ini.get(sect, 'port')
else:
raise ValueError("port must be set in config")
if ini.has_option(sect, 'database'):
self.database = ini.get(sect, 'database')
else:
raise ValueError("database must be set in config")
if ini.has_option(sect, 'username'):
self.username = ini.get(sect, 'username')
else:
raise ValueError("username must be set in config")
if ini.has_option(sect, 'password'):
self.password = ini.get(sect, 'password')
else:
raise ValueError("password must be set in config")
self.prefix = None
if ini.has_option(sect, 'prefix'):
self.prefix = ini.get(sect, 'prefix')
self.timeout = None
if ini.has_option(sect, 'timeout'):
self.timeout = ini.get(sect, 'timeout')
self.version = '0.9'
if ini.has_option(sect, 'version'):
self.version = ini.get(sect, 'version')
def flush09(self, metrics):
"""
Flushes the metrics provided to InfluxDB v0.9+.
Parameters:
- `metrics` : A list of (key,value,timestamp) tuples.
"""
if not metrics:
return
if self.timeout:
conn = httplib.HTTPConnection(self.host, int(self.port), timeout = int(self.timeout))
else:
conn = httplib.HTTPConnection(self.host, int(self.port))
params = urllib.urlencode({'db': self.database, 'u': self.username, 'p': self.password, 'precision':'s'})
headers = {
'Content-Type': 'application/stream',
'User-Agent': 'statsite',
}
# Construct the output
metrics = [m.split("|") for m in metrics if m]
self.logger.info("Outputting %d metrics" % len(metrics))
# Serialize and send via HTTP API
# InfluxDB uses following regexp "^[a-zA-Z][a-zA-Z0-9._-]*$" to validate table/series names,
# so disallowed characters replace by '.'
body = ''
for k, v, ts in metrics:
if self.prefix:
body += "%s" % re.sub(r'[^a-zA-Z0-9._-]+','.', "%s.%s" % (self.prefix, k))
else:
body += "%s" % re.sub(r'[^a-zA-Z0-9._-]+','.', k)
body += " value=" + v + " " + ts + "\n"
self.logger.debug(body)
conn.request("POST", "/write?%s" % params, body, headers)
try:
res = conn.getresponse()
self.logger.info("%s, %s" %(res.status, res.reason))
except:
self.logger.exception('Failed to send metrics to InfluxDB:', res.status, res.reason)
conn.close()
def flush(self, metrics):
"""
Flushes the metrics provided to InfluxDB.
Parameters:
- `metrics` : A list of (key,value,timestamp) tuples.
"""
if not metrics:
return
if self.timeout:
conn = httplib.HTTPConnection(self.host, int(self.port), timeout = int(self.timeout))
else:
conn = httplib.HTTPConnection(self.host, int(self.port))
params = urllib.urlencode({'u': self.username, 'p': self.password, 'time_precision':'s'})
headers = {
'Content-Type': 'application/json',
'User-Agent': 'statsite',
}
# Construct the output
metrics = [m.split("|") for m in metrics if m]
self.logger.info("Outputting %d metrics" % len(metrics))
# Serialize to JSON and send via HTTP API
# InfluxDB uses following regexp "^[a-zA-Z][a-zA-Z0-9._-]*$" to validate table/series names,
# so disallowed characters replace by '.'
if self.prefix:
body = json.dumps([{
"name":"%s" % re.sub(r'[^a-zA-Z0-9._-]+','.', "%s.%s" % (self.prefix, k)),
"columns":["value", "time"],
"points":[[float(v), int(ts)]]
} for k, v, ts in metrics])
else:
body = json.dumps([{
"name":"%s" % re.sub(r'[^a-zA-Z0-9._-]+','.', k),
"columns":["value", "time"],
"points":[[float(v), int(ts)]]
} for k, v, ts in metrics])
self.logger.debug(body)
conn.request("POST", "/db/%s/series?%s" % (self.database, params), body, headers)
try:
res = conn.getresponse()
self.logger.info("%s, %s" %(res.status, res.reason))
except:
self.logger.exception('Failed to send metrics to InfluxDB:', res.status, res.reason)
conn.close()
def version(v):
parts = [int(x) for x in v.split(".")]
while parts[-1] == 0:
parts.pop()
return parts
def main(metrics, *argv):
# Initialize the logger
logging.basicConfig()
# Intialize from our arguments
influxdb = InfluxDBStore(*argv[0:])
# Flush format depends on InfluxDB version
if cmp(version('0.9'), version(influxdb.version)) < 0:
influxdb.flush(metrics.splitlines())
else:
influxdb.flush09(metrics.splitlines())
if __name__ == "__main__":
# Get all the inputs
main(sys.stdin.read(), *sys.argv[1:])
|
codekaki/odoo
|
refs/heads/7.0
|
addons/survey/__init__.py
|
66
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import survey
import wizard
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
xaedes/canopen_301_402
|
refs/heads/master
|
src/canopen_301_402/__init__.py
|
12133432
| |
christian6/django-upicr
|
refs/heads/master
|
icrperu/apps/logistica/inclogistica/__init__.py
|
12133432
| |
hkariti/ansible-modules-core
|
refs/heads/devel
|
files/ini_file.py
|
91
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: ini_file
short_description: Tweak settings in INI files
extends_documentation_fragment: files
description:
- Manage (add, remove, change) individual settings in an INI-style file without having
to manage the file as a whole with, say, M(template) or M(assemble). Adds missing
sections if they don't exist.
- Comments are discarded when the source file is read, and therefore will not
show up in the destination file.
version_added: "0.9"
options:
dest:
description:
- Path to the INI-style file; this file is created if required
required: true
default: null
section:
description:
- Section name in INI file. This is added if C(state=present) automatically when
a single value is being set.
required: true
default: null
option:
description:
- if set (required for changing a I(value)), this is the name of the option.
- May be omitted if adding/removing a whole I(section).
required: false
default: null
value:
description:
- the string value to be associated with an I(option). May be omitted when removing an I(option).
required: false
default: null
backup:
description:
- Create a backup file including the timestamp information so you can get
the original file back if you somehow clobbered it incorrectly.
required: false
default: "no"
choices: [ "yes", "no" ]
others:
description:
- all arguments accepted by the M(file) module also work here
required: false
notes:
- While it is possible to add an I(option) without specifying a I(value), this makes
no sense.
- A section named C(default) cannot be added by the module, but if it exists, individual
options within the section can be updated. (This is a limitation of Python's I(ConfigParser).)
Either use M(template) to create a base INI file with a C([default]) section, or use
M(lineinfile) to add the missing line.
requirements: [ ConfigParser ]
author: "Jan-Piet Mens (@jpmens)"
'''
EXAMPLES = '''
# Ensure "fav=lemonade is in section "[drinks]" in specified file
- ini_file: dest=/etc/conf section=drinks option=fav value=lemonade mode=0600 backup=yes
- ini_file: dest=/etc/anotherconf
section=drinks
option=temperature
value=cold
backup=yes
'''
import ConfigParser
import sys
# ==============================================================
# do_ini
def do_ini(module, filename, section=None, option=None, value=None, state='present', backup=False):
changed = False
if (sys.version_info[0] == 2 and sys.version_info[1] >= 7) or sys.version_info[0] >= 3:
cp = ConfigParser.ConfigParser(allow_no_value=True)
else:
cp = ConfigParser.ConfigParser()
cp.optionxform = identity
try:
f = open(filename)
cp.readfp(f)
except IOError:
pass
if state == 'absent':
if option is None and value is None:
if cp.has_section(section):
cp.remove_section(section)
changed = True
else:
if option is not None:
try:
if cp.get(section, option):
cp.remove_option(section, option)
changed = True
except ConfigParser.InterpolationError:
cp.remove_option(section, option)
changed = True
except:
pass
if state == 'present':
# DEFAULT section is always there by DEFAULT, so never try to add it.
if not cp.has_section(section) and section.upper() != 'DEFAULT':
cp.add_section(section)
changed = True
if option is not None and value is not None:
try:
oldvalue = cp.get(section, option)
if str(value) != str(oldvalue):
cp.set(section, option, value)
changed = True
except ConfigParser.NoSectionError:
cp.set(section, option, value)
changed = True
except ConfigParser.NoOptionError:
cp.set(section, option, value)
changed = True
except ConfigParser.InterpolationError:
cp.set(section, option, value)
changed = True
if changed and not module.check_mode:
if backup:
module.backup_local(filename)
try:
f = open(filename, 'w')
cp.write(f)
except:
module.fail_json(msg="Can't create %s" % filename)
return changed
# ==============================================================
# identity
def identity(arg):
"""
This function simply returns its argument. It serves as a
replacement for ConfigParser.optionxform, which by default
changes arguments to lower case. The identity function is a
better choice than str() or unicode(), because it is
encoding-agnostic.
"""
return arg
# ==============================================================
# main
def main():
module = AnsibleModule(
argument_spec = dict(
dest = dict(required=True),
section = dict(required=True),
option = dict(required=False),
value = dict(required=False),
backup = dict(default='no', type='bool'),
state = dict(default='present', choices=['present', 'absent'])
),
add_file_common_args = True,
supports_check_mode = True
)
info = dict()
dest = os.path.expanduser(module.params['dest'])
section = module.params['section']
option = module.params['option']
value = module.params['value']
state = module.params['state']
backup = module.params['backup']
changed = do_ini(module, dest, section, option, value, state, backup)
file_args = module.load_file_common_arguments(module.params)
changed = module.set_fs_attributes_if_different(file_args, changed)
# Mission complete
module.exit_json(dest=dest, changed=changed, msg="OK")
# import module snippets
from ansible.module_utils.basic import *
main()
|
waytai/odoo
|
refs/heads/8.0
|
addons/l10n_gt/__openerp__.py
|
260
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2009-2010 Soluciones Tecnologócias Prisma S.A. All Rights Reserved.
# José Rodrigo Fernández Menegazzo, Soluciones Tecnologócias Prisma S.A.
# (http://www.solucionesprisma.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#
# This module provides a minimal Guatemalan chart of accounts that can be use
# to build upon a more complex one. It also includes a chart of taxes and
# the Quetzal currency.
#
# This module is based on the UK minimal chart of accounts:
# Copyright (c) 2004-2009 Seath Solutions Ltd. All Rights Reserved.
# Geoff Gardiner, Seath Solutions Ltd (http://www.seathsolutions.com/)
#
# This module works with OpenERP 6.0
#
{
'name': 'Guatemala - Accounting',
'version': '3.0',
'category': 'Localization/Account Charts',
'description': """
This is the base module to manage the accounting chart for Guatemala.
=====================================================================
Agrega una nomenclatura contable para Guatemala. También icluye impuestos y
la moneda del Quetzal. -- Adds accounting chart for Guatemala. It also includes
taxes and the Quetzal currency.""",
'author': 'José Rodrigo Fernández Menegazzo',
'website': 'http://solucionesprisma.com/',
'depends': ['base', 'account', 'account_chart'],
'data': [
'account_types.xml',
'account_chart.xml',
'account_tax.xml',
'l10n_gt_base.xml',
],
'demo': [],
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
amimoto/walky
|
refs/heads/master
|
tests/walkydata/generate-self-signed-certs.py
|
1
|
from OpenSSL import crypto, SSL
from socket import gethostname
from pprint import pprint
from time import gmtime, mktime
CERT_FILE = "ssl.crt"
KEY_FILE = "ssl.key"
def create_self_signed_cert():
# create a key pair
k = crypto.PKey()
k.generate_key(crypto.TYPE_RSA, 1024)
# create a self-signed cert
cert = crypto.X509()
cert.get_subject().C = "CA"
cert.get_subject().ST = "BC"
cert.get_subject().L = "Vancouver"
cert.get_subject().O = "Self Signed Madness"
cert.get_subject().OU = "Self Signyness"
cert.get_subject().CN = gethostname()
cert.set_serial_number(1000)
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(10*365*24*60*60)
cert.set_issuer(cert.get_subject())
cert.set_pubkey(k)
cert.sign(k, 'sha1')
open(CERT_FILE, "wt").write(
crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
open(KEY_FILE, "wt").write(
crypto.dump_privatekey(crypto.FILETYPE_PEM, k))
create_self_signed_cert()
|
kslundberg/pants
|
refs/heads/master
|
tests/python/pants_test/net/http/test_fetcher.py
|
14
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from contextlib import closing
import mox
import pytest
import requests
from six import StringIO
from pants.net.http.fetcher import Fetcher
from pants.util.contextutil import temporary_file
class FetcherTest(mox.MoxTestBase):
def setUp(self):
super(FetcherTest, self).setUp()
self.requests = self.mox.CreateMockAnything()
self.response = self.mox.CreateMock(requests.Response)
self.fetcher = Fetcher(requests_api=self.requests)
self.listener = self.mox.CreateMock(Fetcher.Listener)
def expect_get(self, url, chunk_size_bytes, timeout_secs, listener=True):
self.requests.get(url, stream=True, timeout=timeout_secs).AndReturn(self.response)
self.response.status_code = 200
self.response.headers = {'content-length': '11'}
if listener:
self.listener.status(200, content_length=11)
chunks = ['0123456789', 'a']
self.response.iter_content(chunk_size=chunk_size_bytes).AndReturn(chunks)
return chunks
def test_get(self):
for chunk in self.expect_get('http://bar', chunk_size_bytes=1024, timeout_secs=60):
self.listener.recv_chunk(chunk)
self.listener.finished()
self.response.close()
self.mox.ReplayAll()
self.fetcher.fetch('http://bar',
self.listener,
chunk_size_bytes=1024,
timeout_secs=60)
def test_checksum_listener(self):
digest = self.mox.CreateMockAnything()
for chunk in self.expect_get('http://baz', chunk_size_bytes=1, timeout_secs=37):
self.listener.recv_chunk(chunk)
digest.update(chunk)
self.listener.finished()
digest.hexdigest().AndReturn('42')
self.response.close()
self.mox.ReplayAll()
checksum_listener = Fetcher.ChecksumListener(digest=digest)
self.fetcher.fetch('http://baz',
checksum_listener.wrap(self.listener),
chunk_size_bytes=1,
timeout_secs=37)
self.assertEqual('42', checksum_listener.checksum)
def test_download_listener(self):
downloaded = ''
for chunk in self.expect_get('http://foo', chunk_size_bytes=1048576, timeout_secs=3600):
self.listener.recv_chunk(chunk)
downloaded += chunk
self.listener.finished()
self.response.close()
self.mox.ReplayAll()
with closing(StringIO()) as fp:
self.fetcher.fetch('http://foo',
Fetcher.DownloadListener(fp).wrap(self.listener),
chunk_size_bytes=1024 * 1024,
timeout_secs=60 * 60)
self.assertEqual(downloaded, fp.getvalue())
def test_size_mismatch(self):
self.requests.get('http://foo', stream=True, timeout=60).AndReturn(self.response)
self.response.status_code = 200
self.response.headers = {'content-length': '11'}
self.listener.status(200, content_length=11)
self.response.iter_content(chunk_size=1024).AndReturn(['a', 'b'])
self.listener.recv_chunk('a')
self.listener.recv_chunk('b')
self.response.close()
self.mox.ReplayAll()
with pytest.raises(self.fetcher.Error):
self.fetcher.fetch('http://foo',
self.listener,
chunk_size_bytes=1024,
timeout_secs=60)
def test_get_error_transient(self):
self.requests.get('http://foo', stream=True, timeout=60).AndRaise(requests.ConnectionError)
self.mox.ReplayAll()
with pytest.raises(self.fetcher.TransientError):
self.fetcher.fetch('http://foo',
self.listener,
chunk_size_bytes=1024,
timeout_secs=60)
def test_get_error_permanent(self):
self.requests.get('http://foo', stream=True, timeout=60).AndRaise(requests.TooManyRedirects)
self.mox.ReplayAll()
with pytest.raises(self.fetcher.PermanentError) as e:
self.fetcher.fetch('http://foo',
self.listener,
chunk_size_bytes=1024,
timeout_secs=60)
self.assertTrue(e.value.response_code is None)
def test_http_error(self):
self.requests.get('http://foo', stream=True, timeout=60).AndReturn(self.response)
self.response.status_code = 404
self.listener.status(404)
self.response.close()
self.mox.ReplayAll()
with pytest.raises(self.fetcher.PermanentError) as e:
self.fetcher.fetch('http://foo',
self.listener,
chunk_size_bytes=1024,
timeout_secs=60)
self.assertEqual(404, e.value.response_code)
def test_iter_content_error(self):
self.requests.get('http://foo', stream=True, timeout=60).AndReturn(self.response)
self.response.status_code = 200
self.response.headers = {}
self.listener.status(200, content_length=None)
self.response.iter_content(chunk_size=1024).AndRaise(requests.Timeout)
self.response.close()
self.mox.ReplayAll()
with pytest.raises(self.fetcher.TransientError):
self.fetcher.fetch('http://foo',
self.listener,
chunk_size_bytes=1024,
timeout_secs=60)
def expect_download(self, path_or_fd=None):
downloaded = ''
for chunk in self.expect_get('http://1', chunk_size_bytes=13, timeout_secs=13, listener=False):
downloaded += chunk
self.response.close()
self.mox.ReplayAll()
path = self.fetcher.download('http://1',
path_or_fd=path_or_fd,
chunk_size_bytes=13,
timeout_secs=13)
return downloaded, path
def test_download(self):
downloaded, path = self.expect_download()
try:
with open(path) as fp:
self.assertEqual(downloaded, fp.read())
finally:
os.unlink(path)
def test_download_fd(self):
with temporary_file() as fd:
downloaded, path = self.expect_download(path_or_fd=fd)
self.assertEqual(path, fd.name)
fd.close()
with open(path) as fp:
self.assertEqual(downloaded, fp.read())
def test_download_path(self):
with temporary_file() as fd:
fd.close()
downloaded, path = self.expect_download(path_or_fd=fd.name)
self.assertEqual(path, fd.name)
with open(path) as fp:
self.assertEqual(downloaded, fp.read())
|
ajlopez/PythonSharp
|
refs/heads/master
|
Src/PythonSharp.Tests/Examples/complex.py
|
1
|
class Complex:
def __init__(self, realpart, imagpart):
self.r = realpart
self.i = imagpart
x = Complex(3.0, 4.5)
|
ibinti/intellij-community
|
refs/heads/master
|
python/lib/Lib/site-packages/django/core/handlers/modpython.py
|
189
|
import os
from pprint import pformat
import sys
from warnings import warn
from django import http
from django.core import signals
from django.core.handlers.base import BaseHandler
from django.core.urlresolvers import set_script_prefix
from django.utils import datastructures
from django.utils.encoding import force_unicode, smart_str, iri_to_uri
from django.utils.log import getLogger
logger = getLogger('django.request')
# NOTE: do *not* import settings (or any module which eventually imports
# settings) until after ModPythonHandler has been called; otherwise os.environ
# won't be set up correctly (with respect to settings).
class ModPythonRequest(http.HttpRequest):
def __init__(self, req):
self._req = req
# FIXME: This isn't ideal. The request URI may be encoded (it's
# non-normalized) slightly differently to the "real" SCRIPT_NAME
# and PATH_INFO values. This causes problems when we compute path_info,
# below. For now, don't use script names that will be subject to
# encoding/decoding.
self.path = force_unicode(req.uri)
root = req.get_options().get('django.root', '')
self.django_root = root
# req.path_info isn't necessarily computed correctly in all
# circumstances (it's out of mod_python's control a bit), so we use
# req.uri and some string manipulations to get the right value.
if root and req.uri.startswith(root):
self.path_info = force_unicode(req.uri[len(root):])
else:
self.path_info = self.path
if not self.path_info:
# Django prefers empty paths to be '/', rather than '', to give us
# a common start character for URL patterns. So this is a little
# naughty, but also pretty harmless.
self.path_info = u'/'
self._post_parse_error = False
self._stream = self._req
self._read_started = False
def __repr__(self):
# Since this is called as part of error handling, we need to be very
# robust against potentially malformed input.
try:
get = pformat(self.GET)
except:
get = '<could not parse>'
if self._post_parse_error:
post = '<could not parse>'
else:
try:
post = pformat(self.POST)
except:
post = '<could not parse>'
try:
cookies = pformat(self.COOKIES)
except:
cookies = '<could not parse>'
try:
meta = pformat(self.META)
except:
meta = '<could not parse>'
return smart_str(u'<ModPythonRequest\npath:%s,\nGET:%s,\nPOST:%s,\nCOOKIES:%s,\nMETA:%s>' %
(self.path, unicode(get), unicode(post),
unicode(cookies), unicode(meta)))
def get_full_path(self):
# RFC 3986 requires self._req.args to be in the ASCII range, but this
# doesn't always happen, so rather than crash, we defensively encode it.
return '%s%s' % (self.path, self._req.args and ('?' + iri_to_uri(self._req.args)) or '')
def is_secure(self):
try:
return self._req.is_https()
except AttributeError:
# mod_python < 3.2.10 doesn't have req.is_https().
return self._req.subprocess_env.get('HTTPS', '').lower() in ('on', '1')
def _get_request(self):
if not hasattr(self, '_request'):
self._request = datastructures.MergeDict(self.POST, self.GET)
return self._request
def _get_get(self):
if not hasattr(self, '_get'):
self._get = http.QueryDict(self._req.args, encoding=self._encoding)
return self._get
def _set_get(self, get):
self._get = get
def _get_post(self):
if not hasattr(self, '_post'):
self._load_post_and_files()
return self._post
def _set_post(self, post):
self._post = post
def _get_cookies(self):
if not hasattr(self, '_cookies'):
self._cookies = http.parse_cookie(self._req.headers_in.get('cookie', ''))
return self._cookies
def _set_cookies(self, cookies):
self._cookies = cookies
def _get_files(self):
if not hasattr(self, '_files'):
self._load_post_and_files()
return self._files
def _get_meta(self):
"Lazy loader that returns self.META dictionary"
if not hasattr(self, '_meta'):
self._meta = {
'AUTH_TYPE': self._req.ap_auth_type,
'CONTENT_LENGTH': self._req.headers_in.get('content-length', 0),
'CONTENT_TYPE': self._req.headers_in.get('content-type'),
'GATEWAY_INTERFACE': 'CGI/1.1',
'PATH_INFO': self.path_info,
'PATH_TRANSLATED': None, # Not supported
'QUERY_STRING': self._req.args,
'REMOTE_ADDR': self._req.connection.remote_ip,
'REMOTE_HOST': None, # DNS lookups not supported
'REMOTE_IDENT': self._req.connection.remote_logname,
'REMOTE_USER': self._req.user,
'REQUEST_METHOD': self._req.method,
'SCRIPT_NAME': self.django_root,
'SERVER_NAME': self._req.server.server_hostname,
'SERVER_PORT': self._req.connection.local_addr[1],
'SERVER_PROTOCOL': self._req.protocol,
'SERVER_SOFTWARE': 'mod_python'
}
for key, value in self._req.headers_in.items():
key = 'HTTP_' + key.upper().replace('-', '_')
self._meta[key] = value
return self._meta
def _get_method(self):
return self.META['REQUEST_METHOD'].upper()
GET = property(_get_get, _set_get)
POST = property(_get_post, _set_post)
COOKIES = property(_get_cookies, _set_cookies)
FILES = property(_get_files)
META = property(_get_meta)
REQUEST = property(_get_request)
method = property(_get_method)
class ModPythonHandler(BaseHandler):
request_class = ModPythonRequest
def __call__(self, req):
warn(('The mod_python handler is deprecated; use a WSGI or FastCGI server instead.'),
PendingDeprecationWarning)
# mod_python fakes the environ, and thus doesn't process SetEnv. This fixes that
os.environ.update(req.subprocess_env)
# now that the environ works we can see the correct settings, so imports
# that use settings now can work
from django.conf import settings
# if we need to set up middleware, now that settings works we can do it now.
if self._request_middleware is None:
self.load_middleware()
set_script_prefix(req.get_options().get('django.root', ''))
signals.request_started.send(sender=self.__class__)
try:
try:
request = self.request_class(req)
except UnicodeDecodeError:
logger.warning('Bad Request (UnicodeDecodeError): %s' % request.path,
exc_info=sys.exc_info(),
extra={
'status_code': 400,
'request': request
}
)
response = http.HttpResponseBadRequest()
else:
response = self.get_response(request)
finally:
signals.request_finished.send(sender=self.__class__)
# Convert our custom HttpResponse object back into the mod_python req.
req.content_type = response['Content-Type']
for key, value in response.items():
if key != 'content-type':
req.headers_out[str(key)] = str(value)
for c in response.cookies.values():
req.headers_out.add('Set-Cookie', c.output(header=''))
req.status = response.status_code
try:
for chunk in response:
req.write(chunk)
finally:
response.close()
return 0 # mod_python.apache.OK
def handler(req):
# mod_python hooks into this function.
return ModPythonHandler()(req)
|
grigoriy-chirkov/mipt-mips-2015
|
refs/heads/master
|
libs/gtest-1.6.0/test/gtest_output_test.py
|
1733
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the text output of Google C++ Testing Framework.
SYNOPSIS
gtest_output_test.py --build_dir=BUILD/DIR --gengolden
# where BUILD/DIR contains the built gtest_output_test_ file.
gtest_output_test.py --gengolden
gtest_output_test.py
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sys
import gtest_test_utils
# The flag for generating the golden file
GENGOLDEN_FLAG = '--gengolden'
CATCH_EXCEPTIONS_ENV_VAR_NAME = 'GTEST_CATCH_EXCEPTIONS'
IS_WINDOWS = os.name == 'nt'
# TODO(vladl@google.com): remove the _lin suffix.
GOLDEN_NAME = 'gtest_output_test_golden_lin.txt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_output_test_')
# At least one command we exercise must not have the
# --gtest_internal_skip_environment_and_ad_hoc_tests flag.
COMMAND_LIST_TESTS = ({}, [PROGRAM_PATH, '--gtest_list_tests'])
COMMAND_WITH_COLOR = ({}, [PROGRAM_PATH, '--gtest_color=yes'])
COMMAND_WITH_TIME = ({}, [PROGRAM_PATH,
'--gtest_print_time',
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=FatalFailureTest.*:LoggingTest.*'])
COMMAND_WITH_DISABLED = (
{}, [PROGRAM_PATH,
'--gtest_also_run_disabled_tests',
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=*DISABLED_*'])
COMMAND_WITH_SHARDING = (
{'GTEST_SHARD_INDEX': '1', 'GTEST_TOTAL_SHARDS': '2'},
[PROGRAM_PATH,
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=PassingTest.*'])
GOLDEN_PATH = os.path.join(gtest_test_utils.GetSourceDir(), GOLDEN_NAME)
def ToUnixLineEnding(s):
"""Changes all Windows/Mac line endings in s to UNIX line endings."""
return s.replace('\r\n', '\n').replace('\r', '\n')
def RemoveLocations(test_output):
"""Removes all file location info from a Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with all file location info (in the form of
'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or
'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by
'FILE_NAME:#: '.
"""
return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\: ', r'\1:#: ', test_output)
def RemoveStackTraceDetails(output):
"""Removes all stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n',
'Stack trace: (omitted)\n\n', output)
def RemoveStackTraces(output):
"""Removes all traces of stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n', '', output)
def RemoveTime(output):
"""Removes all time information from a Google Test program's output."""
return re.sub(r'\(\d+ ms', '(? ms', output)
def RemoveTypeInfoDetails(test_output):
"""Removes compiler-specific type info from Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with type information normalized to canonical form.
"""
# some compilers output the name of type 'unsigned int' as 'unsigned'
return re.sub(r'unsigned int', 'unsigned', test_output)
def NormalizeToCurrentPlatform(test_output):
"""Normalizes platform specific output details for easier comparison."""
if IS_WINDOWS:
# Removes the color information that is not present on Windows.
test_output = re.sub('\x1b\\[(0;3\d)?m', '', test_output)
# Changes failure message headers into the Windows format.
test_output = re.sub(r': Failure\n', r': error: ', test_output)
# Changes file(line_number) to file:line_number.
test_output = re.sub(r'((\w|\.)+)\((\d+)\):', r'\1:\3:', test_output)
return test_output
def RemoveTestCounts(output):
"""Removes test counts from a Google Test program's output."""
output = re.sub(r'\d+ tests?, listed below',
'? tests, listed below', output)
output = re.sub(r'\d+ FAILED TESTS',
'? FAILED TESTS', output)
output = re.sub(r'\d+ tests? from \d+ test cases?',
'? tests from ? test cases', output)
output = re.sub(r'\d+ tests? from ([a-zA-Z_])',
r'? tests from \1', output)
return re.sub(r'\d+ tests?\.', '? tests.', output)
def RemoveMatchingTests(test_output, pattern):
"""Removes output of specified tests from a Google Test program's output.
This function strips not only the beginning and the end of a test but also
all output in between.
Args:
test_output: A string containing the test output.
pattern: A regex string that matches names of test cases or
tests to remove.
Returns:
Contents of test_output with tests whose names match pattern removed.
"""
test_output = re.sub(
r'.*\[ RUN \] .*%s(.|\n)*?\[( FAILED | OK )\] .*%s.*\n' % (
pattern, pattern),
'',
test_output)
return re.sub(r'.*%s.*\n' % pattern, '', test_output)
def NormalizeOutput(output):
"""Normalizes output (the output of gtest_output_test_.exe)."""
output = ToUnixLineEnding(output)
output = RemoveLocations(output)
output = RemoveStackTraceDetails(output)
output = RemoveTime(output)
return output
def GetShellCommandOutput(env_cmd):
"""Runs a command in a sub-process, and returns its output in a string.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
Returns:
A string with the command's combined standard and diagnostic output.
"""
# Spawns cmd in a sub-process, and gets its standard I/O file objects.
# Set and save the environment properly.
environ = os.environ.copy()
environ.update(env_cmd[0])
p = gtest_test_utils.Subprocess(env_cmd[1], env=environ)
return p.output
def GetCommandOutput(env_cmd):
"""Runs a command and returns its output with all file location
info stripped off.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
"""
# Disables exception pop-ups on Windows.
environ, cmdline = env_cmd
environ = dict(environ) # Ensures we are modifying a copy.
environ[CATCH_EXCEPTIONS_ENV_VAR_NAME] = '1'
return NormalizeOutput(GetShellCommandOutput((environ, cmdline)))
def GetOutputOfAllCommands():
"""Returns concatenated output from several representative commands."""
return (GetCommandOutput(COMMAND_WITH_COLOR) +
GetCommandOutput(COMMAND_WITH_TIME) +
GetCommandOutput(COMMAND_WITH_DISABLED) +
GetCommandOutput(COMMAND_WITH_SHARDING))
test_list = GetShellCommandOutput(COMMAND_LIST_TESTS)
SUPPORTS_DEATH_TESTS = 'DeathTest' in test_list
SUPPORTS_TYPED_TESTS = 'TypedTest' in test_list
SUPPORTS_THREADS = 'ExpectFailureWithThreadsTest' in test_list
SUPPORTS_STACK_TRACES = False
CAN_GENERATE_GOLDEN_FILE = (SUPPORTS_DEATH_TESTS and
SUPPORTS_TYPED_TESTS and
SUPPORTS_THREADS)
class GTestOutputTest(gtest_test_utils.TestCase):
def RemoveUnsupportedTests(self, test_output):
if not SUPPORTS_DEATH_TESTS:
test_output = RemoveMatchingTests(test_output, 'DeathTest')
if not SUPPORTS_TYPED_TESTS:
test_output = RemoveMatchingTests(test_output, 'TypedTest')
test_output = RemoveMatchingTests(test_output, 'TypedDeathTest')
test_output = RemoveMatchingTests(test_output, 'TypeParamDeathTest')
if not SUPPORTS_THREADS:
test_output = RemoveMatchingTests(test_output,
'ExpectFailureWithThreadsTest')
test_output = RemoveMatchingTests(test_output,
'ScopedFakeTestPartResultReporterTest')
test_output = RemoveMatchingTests(test_output,
'WorksConcurrently')
if not SUPPORTS_STACK_TRACES:
test_output = RemoveStackTraces(test_output)
return test_output
def testOutput(self):
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'rb')
# A mis-configured source control system can cause \r appear in EOL
# sequences when we read the golden file irrespective of an operating
# system used. Therefore, we need to strip those \r's from newlines
# unconditionally.
golden = ToUnixLineEnding(golden_file.read())
golden_file.close()
# We want the test to pass regardless of certain features being
# supported or not.
# We still have to remove type name specifics in all cases.
normalized_actual = RemoveTypeInfoDetails(output)
normalized_golden = RemoveTypeInfoDetails(golden)
if CAN_GENERATE_GOLDEN_FILE:
self.assertEqual(normalized_golden, normalized_actual)
else:
normalized_actual = NormalizeToCurrentPlatform(
RemoveTestCounts(normalized_actual))
normalized_golden = NormalizeToCurrentPlatform(
RemoveTestCounts(self.RemoveUnsupportedTests(normalized_golden)))
# This code is very handy when debugging golden file differences:
if os.getenv('DEBUG_GTEST_OUTPUT_TEST'):
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_actual.txt'), 'wb').write(
normalized_actual)
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_golden.txt'), 'wb').write(
normalized_golden)
self.assertEqual(normalized_golden, normalized_actual)
if __name__ == '__main__':
if sys.argv[1:] == [GENGOLDEN_FLAG]:
if CAN_GENERATE_GOLDEN_FILE:
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'wb')
golden_file.write(output)
golden_file.close()
else:
message = (
"""Unable to write a golden file when compiled in an environment
that does not support all the required features (death tests, typed tests,
and multiple threads). Please generate the golden file using a binary built
with those features enabled.""")
sys.stderr.write(message)
sys.exit(1)
else:
gtest_test_utils.Main()
|
infobloxopen/neutron
|
refs/heads/master
|
neutron/db/common_db_mixin.py
|
13
|
# Copyright (c) 2014 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import weakref
from sqlalchemy import sql
from neutron.common import exceptions as n_exc
from neutron.db import sqlalchemyutils
class CommonDbMixin(object):
"""Common methods used in core and service plugins."""
# Plugins, mixin classes implementing extension will register
# hooks into the dict below for "augmenting" the "core way" of
# building a query for retrieving objects from a model class.
# To this aim, the register_model_query_hook and unregister_query_hook
# from this class should be invoked
_model_query_hooks = {}
# This dictionary will store methods for extending attributes of
# api resources. Mixins can use this dict for adding their own methods
# TODO(salvatore-orlando): Avoid using class-level variables
_dict_extend_functions = {}
@classmethod
def register_model_query_hook(cls, model, name, query_hook, filter_hook,
result_filters=None):
"""Register a hook to be invoked when a query is executed.
Add the hooks to the _model_query_hooks dict. Models are the keys
of this dict, whereas the value is another dict mapping hook names to
callables performing the hook.
Each hook has a "query" component, used to build the query expression
and a "filter" component, which is used to build the filter expression.
Query hooks take as input the query being built and return a
transformed query expression.
Filter hooks take as input the filter expression being built and return
a transformed filter expression
"""
cls._model_query_hooks.setdefault(model, {})[name] = {
'query': query_hook, 'filter': filter_hook,
'result_filters': result_filters}
@classmethod
def register_dict_extend_funcs(cls, resource, funcs):
cls._dict_extend_functions.setdefault(resource, []).extend(funcs)
@property
def safe_reference(self):
"""Return a weakref to the instance.
Minimize the potential for the instance persisting
unnecessarily in memory by returning a weakref proxy that
won't prevent deallocation.
"""
return weakref.proxy(self)
def model_query_scope(self, context, model):
# NOTE(jkoelker) non-admin queries are scoped to their tenant_id
# NOTE(salvatore-orlando): unless the model allows for shared objects
# NOTE(mestery): Or the user has the advsvc role
return ((not context.is_admin and hasattr(model, 'tenant_id')) and
(not context.is_advsvc and hasattr(model, 'tenant_id')))
def _model_query(self, context, model):
query = context.session.query(model)
# define basic filter condition for model query
query_filter = None
if self.model_query_scope(context, model):
if hasattr(model, 'shared'):
query_filter = ((model.tenant_id == context.tenant_id) |
(model.shared == sql.true()))
else:
query_filter = (model.tenant_id == context.tenant_id)
# Execute query hooks registered from mixins and plugins
for _name, hooks in self._model_query_hooks.get(model,
{}).iteritems():
query_hook = hooks.get('query')
if isinstance(query_hook, basestring):
query_hook = getattr(self, query_hook, None)
if query_hook:
query = query_hook(context, model, query)
filter_hook = hooks.get('filter')
if isinstance(filter_hook, basestring):
filter_hook = getattr(self, filter_hook, None)
if filter_hook:
query_filter = filter_hook(context, model, query_filter)
# NOTE(salvatore-orlando): 'if query_filter' will try to evaluate the
# condition, raising an exception
if query_filter is not None:
query = query.filter(query_filter)
return query
def _fields(self, resource, fields):
if fields:
return dict(((key, item) for key, item in resource.items()
if key in fields))
return resource
def _get_tenant_id_for_create(self, context, resource):
if context.is_admin and 'tenant_id' in resource:
tenant_id = resource['tenant_id']
elif ('tenant_id' in resource and
resource['tenant_id'] != context.tenant_id):
reason = _('Cannot create resource for another tenant')
raise n_exc.AdminRequired(reason=reason)
else:
tenant_id = context.tenant_id
return tenant_id
def _get_by_id(self, context, model, id):
query = self._model_query(context, model)
return query.filter(model.id == id).one()
def _apply_filters_to_query(self, query, model, filters):
if filters:
for key, value in filters.iteritems():
column = getattr(model, key, None)
if column:
if not value:
query = query.filter(sql.false())
return query
query = query.filter(column.in_(value))
for _name, hooks in self._model_query_hooks.get(model,
{}).iteritems():
result_filter = hooks.get('result_filters', None)
if isinstance(result_filter, basestring):
result_filter = getattr(self, result_filter, None)
if result_filter:
query = result_filter(query, filters)
return query
def _apply_dict_extend_functions(self, resource_type,
response, db_object):
for func in self._dict_extend_functions.get(
resource_type, []):
args = (response, db_object)
if isinstance(func, basestring):
func = getattr(self, func, None)
else:
# must call unbound method - use self as 1st argument
args = (self,) + args
if func:
func(*args)
def _get_collection_query(self, context, model, filters=None,
sorts=None, limit=None, marker_obj=None,
page_reverse=False):
collection = self._model_query(context, model)
collection = self._apply_filters_to_query(collection, model, filters)
if limit and page_reverse and sorts:
sorts = [(s[0], not s[1]) for s in sorts]
collection = sqlalchemyutils.paginate_query(collection, model, limit,
sorts,
marker_obj=marker_obj)
return collection
def _get_collection(self, context, model, dict_func, filters=None,
fields=None, sorts=None, limit=None, marker_obj=None,
page_reverse=False):
query = self._get_collection_query(context, model, filters=filters,
sorts=sorts,
limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
items = [dict_func(c, fields) for c in query]
if limit and page_reverse:
items.reverse()
return items
def _get_collection_count(self, context, model, filters=None):
return self._get_collection_query(context, model, filters).count()
def _get_marker_obj(self, context, resource, limit, marker):
if limit and marker:
return getattr(self, '_get_%s' % resource)(context, marker)
return None
def _filter_non_model_columns(self, data, model):
"""Remove all the attributes from data which are not columns of
the model passed as second parameter.
"""
columns = [c.name for c in model.__table__.columns]
return dict((k, v) for (k, v) in
data.iteritems() if k in columns)
|
Laurawly/tvm-1
|
refs/heads/master
|
python/tvm/auto_scheduler/dispatcher.py
|
1
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The global context that dispatches best schedules to workloads.
In auto-scheduler, a state (loop_state.py::StateObject) saves the
schedule configuration by its transform_steps, so a state is used
as a schedule configuration here.
"""
# pylint: disable=invalid-name
import logging
import pathlib
import numpy as np
from tvm.contrib.utils import tempdir
from tvm.tir.expr import FloatImm
from .cost_model import RandomModel, XGBModel
from .measure import LocalRPCMeasureContext
from .measure_record import RecordToFile, load_records
from .search_policy import PreloadMeasuredStates, SketchPolicy
from .search_task import SearchTask, TuningOptions
from .utils import calc_workload_dis_factor, decode_workload_key
logger = logging.getLogger("auto_scheduler")
class DispatchContext(object):
"""
Base class of dispatch context.
"""
current = None
def __init__(self):
self._old_ctx = DispatchContext.current
def query(self, target, workload_key, has_complex_op, dag):
"""
Query the context to get the specific config for a workload.
If cannot find the result inside this context, this function will query it
from the upper contexts.
Parameters
----------
target: Target
The current target
workload_key : str
The workload key
has_complex_op: bool
Whether this workload has at least one complex op.
dag: ComputeDAG
The ComputeDAG of the workload.
Returns
-------
state : StateObject
The state that stores schedule configuration for the workload
"""
ret = self._query_inside(target, workload_key)
if ret is None:
ret = self._old_ctx.query(target, workload_key, has_complex_op, dag)
return ret
def update(self, target, workload_key, state):
"""
Update the config for a workload
Parameters
----------
target: Target
The current target
workload_key : str
The current workload_key.
state : StateObject
The state that stores schedule configuration for the workload
"""
raise NotImplementedError()
def _query_inside(self, target, workload_key):
"""
Query the context to get the specific config for a workload.
This function only query config inside this context.
Parameters
----------
target: Target
The current target
workload_key : str
The current workload_key.
Returns
-------
state : StateObject
The schedule configuration for the workload
"""
raise NotImplementedError()
def __enter__(self):
self._old_ctx = DispatchContext.current
DispatchContext.current = self
return self
def __exit__(self, ptype, value, trace):
DispatchContext.current = self._old_ctx
class ApplyHistoryBest(DispatchContext):
"""
Apply the history best config
Parameters
----------
records : str or iterator of (auto_scheduler.measure.MeasureInput,\
auto_scheduler.measure.MeasureResult)
Collection of tuning records.
If is str, then it should be the filename of a records log file.
Each row of this file is an encoded record pair. Otherwise, it is an iterator.
n_lines: Optional[int]
if it is not None, only load the first `n_lines` lines of log.
include_compatible: bool
When set to True, compatible records will also be considered.
"""
def __init__(self, records, n_lines=None, include_compatible=False):
super(ApplyHistoryBest, self).__init__()
self.include_compatible = include_compatible
# Dict[str (target key),
# Dict[str (workload hash),
# Dict[tuple (workload args), tuple (State, cost)]]]
self.best_by_targetkey = {}
self.best_by_model = {}
self._best_user_defined = {}
self.load(records, n_lines)
@staticmethod
def get_workload_entry(best_records, target_key, workload_key):
"""Get the entry of the target key and workload key hash in the given best record map.
Parameters
----------
best_records: Dict[str, Dict[str, Dict[str, Any]]]
The best record map.
target_key: str
The first key to the best_records.
workload_key: str
The workload key that can be decoded to workload hash and args.
Returns
-------
entry: Dict[str, Any]
The entry in best_records with target key and workload hash.
workload_hash: str
The workload hash decoded from workload_key.
workload_args: Tuple[Any, ...]
The hashable tuple of workload args decoded from workload_key.
"""
workload_hash, workload_args = decode_workload_key(workload_key)
if target_key not in best_records:
best_records[target_key] = {}
if workload_hash not in best_records[target_key]:
best_records[target_key][workload_hash] = {}
return best_records[target_key][workload_hash], workload_hash, workload_args
def load(self, records, n_lines=None):
"""Load records to this dispatch context
Parameters
----------
records : str or iterator of (auto_scheduler.measure.MeasureInput,\
auto_scheduler.measure.MeasureResult)
Collection of tuning records.
If is str, then it should be the filename of a records log file.
Each row of this file is an encoded record pair. Otherwise, it is an iterator.
n_lines: Optional[int]
if it is not None, only load the first `n_lines` lines of log
"""
if isinstance(records, pathlib.Path):
records = str(records)
if isinstance(records, str):
records = load_records(records)
if not records:
return
best_by_targetkey = self.best_by_targetkey
best_by_model = self.best_by_model
counter = 0
for inp, res in records:
if n_lines is not None and counter >= n_lines:
break
counter += 1
if res.error_no != 0:
continue
costs = [x.value for x in res.costs if isinstance(x, FloatImm)]
cost = np.mean(costs)
# use target keys in tvm target system as key to build best map
for k in inp.task.target.keys:
entry, _, workload_args = self.get_workload_entry(
best_by_targetkey, k, inp.task.workload_key
)
if workload_args not in entry:
entry[workload_args] = (inp.state, cost)
else:
_, other_cost = entry[workload_args]
if other_cost > cost:
entry[workload_args] = (inp.state, cost)
# use model as key to build best map
entry, _, workload_args = self.get_workload_entry(
best_by_model, inp.task.target.model, inp.task.workload_key
)
if workload_args not in entry:
if inp.task.target.model != "unknown":
entry[workload_args] = (inp.state, cost)
else:
_, other_cost = entry[workload_args]
if other_cost > cost:
entry[workload_args] = (inp.state, cost)
logger.debug("Finish loading %d records", counter)
def _query_inside(self, target, workload_key):
if target is None:
raise RuntimeError(
"Need a target context to find the history best. "
"Hint: If your target is llvm, use `with tvm.target.create('llvm'):`"
" above the dispatcher call. So does other target. "
)
def match_record(best_records, target_key, workload_key):
"""The helper function to match the record in the given map
and return the matched state, or None if no match.
"""
ret = None
entry, workload_hash, workload_args = self.get_workload_entry(
best_records, target_key, workload_key
)
if workload_args in entry:
ret = entry[workload_args][0]
elif self.include_compatible:
best_cost = float("inf")
for args, val in entry.items():
dis_f = calc_workload_dis_factor(
(workload_hash, workload_args), (workload_hash, args)
)
if dis_f == float("inf"):
continue
state, cost = val
cost *= dis_f
if ret is None or cost < best_cost:
best_cost = cost
ret = state
return ret
# first try matching by model
ret = match_record(self._best_user_defined, target.model, workload_key)
if ret is not None:
return ret
ret = match_record(self.best_by_model, target.model, workload_key)
if ret is not None:
return ret
# then try matching by target key
for k in target.keys:
ret = match_record(self._best_user_defined, k, workload_key)
if ret is not None:
return ret
ret = match_record(self.best_by_targetkey, k, workload_key)
if ret is not None:
return ret
return None
def update(self, target, workload_key, state):
entry, _, workload_args = self.get_workload_entry(
self._best_user_defined, target.model, workload_key
)
entry[workload_args] = (state, 1)
for k in target.keys:
entry, _, _ = self.get_workload_entry(self._best_user_defined, k, workload_key)
entry[workload_args] = (state, 1)
class ApplyHistoryBestOrSample(ApplyHistoryBest):
"""
Apply the history best config, or sample a valid schedule if no config is found.
Parameters
----------
records : str or iterator of (auto_scheduler.measure.MeasureInput,\
auto_scheduler.measure.MeasureResult)
Collection of tuning records.
If is str, then it should be the filename of a records log file.
Each row of this file is an encoded record pair. Otherwise, it is an iterator.
sample_simple_workloads: bool
When False, sampling will not apply to simple workloads (w/o reduction).
cost_model_file: str
The filename of the pre-trained XGBoost cost model. If not present, then random
model will be used.
num_measure: int
Meausre the top-N rank of sampled schedules on the device. The default -1 means
no measurement and simply return the top-1 schedule ranked by the cost model.
"""
def __init__(
self, records, sample_simple_workloads=False, cost_model_file=None, num_measure=-1
):
self.sample_simple_workloads = sample_simple_workloads
self.num_measure = num_measure
self.log_dir = tempdir()
if cost_model_file is None:
self.cost_model = RandomModel()
else:
self.cost_model = XGBModel()
self.cost_model.load(cost_model_file)
super(ApplyHistoryBestOrSample, self).__init__(
records, n_lines=None, include_compatible=True
)
def query(self, target, workload_key, has_complex_op, dag):
if has_complex_op or self.sample_simple_workloads:
ret = self._query_inside(target, workload_key)
else:
ret = super(ApplyHistoryBestOrSample, self)._query_inside(target, workload_key)
if ret is None:
ret = self._old_ctx.query(target, workload_key, has_complex_op, dag)
return ret
def _query_inside(self, target, workload_key):
ret = super(ApplyHistoryBestOrSample, self)._query_inside(target, workload_key)
if ret is not None:
return ret
# Sampling valid schedules when no existing records can be used.
task = SearchTask(workload_key=workload_key, target=target)
measure_ctx = LocalRPCMeasureContext(min_repeat_ms=300)
log_file = self.log_dir.relpath("%s.log" % decode_workload_key(workload_key)[0])
while ret is None:
tune_option = TuningOptions(
num_measure_trials=self.num_measure,
runner=measure_ctx.runner,
measure_callbacks=[RecordToFile(log_file)],
verbose=0,
)
search_policy = SketchPolicy(
task,
self.cost_model,
params={
"eps_greedy": 0.01,
"sample_init_min_population": 64,
"evolutionary_search_num_iters": 0,
},
init_search_callbacks=[PreloadMeasuredStates(log_file)],
verbose=0,
)
task.tune(tune_option, search_policy)
# Load the sampled records and query again.
self.load(log_file)
ret = super(ApplyHistoryBestOrSample, self)._query_inside(target, workload_key)
del measure_ctx
return ret
class FallbackContext(DispatchContext):
"""
A fallback dispatch context.
This is used as the root context.
"""
def __init__(self):
super(FallbackContext, self).__init__()
self.memory = {}
# Verbose level:
# 0: Completely silent.
# 1: Warning the missing configs for querying complex tasks.
# 2: Warning the missing configs for querying all tasks.
self.verbose = 1
# a set to prevent print duplicated message
self.messages = set()
def query(self, target, workload_key, has_complex_op, dag):
key = (str(target), workload_key)
if key in self.memory:
return self.memory[key]
if self.verbose == 2 or (has_complex_op and self.verbose == 1):
msg = (
"-----------------------------------\n"
"Cannot find tuned schedules for target=%s, workload_key=%s. "
"A fallback TOPI schedule is used, "
"which may bring great performance regression or even compilation failure. "
"Compute DAG info:\n%s" % (target, workload_key, dag)
)
if msg not in self.messages:
self.messages.add(msg)
logger.warning(msg)
state = None
# cache this config to avoid duplicated warning message
self.memory[key] = state
return state
def _query_inside(self, target, workload_key):
_ = target = workload_key
raise RuntimeError("This function should never be called")
def update(self, target, workload_key, state):
key = (str(target), workload_key)
self.memory[key] = state
DispatchContext.current = FallbackContext()
|
genome/flow-core
|
refs/heads/master
|
flow/shell_command/petri_net/actions.py
|
1
|
from flow.petri_net.actions.merge import BasicMergeAction
import logging
LOG = logging.getLogger(__name__)
class ShellCommandDispatchAction(BasicMergeAction):
place_refs = [
"msg: dispatch_failure",
"msg: dispatch_success",
"msg: execute_begin",
"msg: execute_failure",
"msg: execute_success",
]
required_arguments = place_refs
# Hooks that subclasses can override
def command_line(self, net, token_data):
return self.args['command_line']
# Private methods
def _response_places(self):
return {x: self.args[x] for x in self.place_refs}
def executor_data(self, net, color_descriptor, token_data, response_places):
executor_data = {}
executor_data['command_line'] = self.command_line(net, token_data)
umask = net.constant('umask')
if umask:
executor_data['umask'] = int(umask)
self.set_io_files(net, executor_data, token_data)
return executor_data
def callback_data(self, net, color_descriptor, response_places):
result = {
u'net_key': net.key,
u'color': color_descriptor.color,
u'color_group_idx': color_descriptor.group.idx,
}
result.update(response_places)
return result
def set_io_files(self, net, executor_data, token_data):
if 'stderr' in self.args:
executor_data['stderr'] = self.args['stderr']
if 'stdin' in self.args:
executor_data['stdin'] = self.args['stdin']
if 'stdout' in self.args:
executor_data['stdout'] = self.args['stdout']
def base_message_params(self, net, color_descriptor):
params = {
'user_id': int(net.constant('user_id')),
'group_id': int(net.constant('group_id')),
'working_directory': net.constant('working_directory', '/tmp'),
}
params['environment'] = self.environment(net, color_descriptor)
return params
def environment(self, net, color_descriptor):
return net.constant('environment', {})
def execute(self, net, color_descriptor, active_tokens, service_interfaces):
tokens, deferred = BasicMergeAction.execute(self, net,
color_descriptor, active_tokens, service_interfaces)
response_places = self._response_places()
# BasicMergeAction returns exactly one token
token_data = tokens[0].data
service = service_interfaces[self.service_name]
deferred.addCallback(lambda x: service.submit(
callback_data=self.callback_data(net,
color_descriptor, response_places),
executor_data=self.executor_data(net, color_descriptor,
token_data, response_places),
**self.base_message_params(net, color_descriptor)))
return tokens, deferred
class LSFDispatchAction(ShellCommandDispatchAction):
service_name = "lsf"
def executor_data(self, net, color_descriptor, token_data, response_places):
executor_data = ShellCommandDispatchAction.executor_data(self, net,
color_descriptor, token_data, response_places)
executor_data['resources'] = self.args.get('resources', {})
if 'lsf_options' in self.args:
executor_data['lsf_options'] = self.args['lsf_options']
executor_data.update(self.callback_data(net,
color_descriptor, response_places))
return executor_data
class ForkDispatchAction(ShellCommandDispatchAction):
service_name = "fork"
|
jnewland/home-assistant
|
refs/heads/ci
|
homeassistant/components/raincloud/sensor.py
|
7
|
"""Support for Melnor RainCloud sprinkler water timer."""
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_MONITORED_CONDITIONS
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.icon import icon_for_battery_level
from . import DATA_RAINCLOUD, ICON_MAP, SENSORS, RainCloudEntity
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_MONITORED_CONDITIONS, default=list(SENSORS)):
vol.All(cv.ensure_list, [vol.In(SENSORS)]),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up a sensor for a raincloud device."""
raincloud = hass.data[DATA_RAINCLOUD].data
sensors = []
for sensor_type in config.get(CONF_MONITORED_CONDITIONS):
if sensor_type == 'battery':
sensors.append(
RainCloudSensor(raincloud.controller.faucet,
sensor_type))
else:
# create a sensor for each zone managed by a faucet
for zone in raincloud.controller.faucet.zones:
sensors.append(RainCloudSensor(zone, sensor_type))
add_entities(sensors, True)
return True
class RainCloudSensor(RainCloudEntity):
"""A sensor implementation for raincloud device."""
@property
def state(self):
"""Return the state of the sensor."""
return self._state
def update(self):
"""Get the latest data and updates the states."""
_LOGGER.debug("Updating RainCloud sensor: %s", self._name)
if self._sensor_type == 'battery':
self._state = self.data.battery
else:
self._state = getattr(self.data, self._sensor_type)
@property
def icon(self):
"""Icon to use in the frontend, if any."""
if self._sensor_type == 'battery' and self._state is not None:
return icon_for_battery_level(battery_level=int(self._state),
charging=False)
return ICON_MAP.get(self._sensor_type)
|
SungEun-Steve-Kim/test-mp
|
refs/heads/master
|
tests/bytecode/mp-tests/augassign1.py
|
22
|
[] = ()
x += 1
x.y += 1
x.f().y += 1
x[1] += 2
|
Celedhrim/persomov
|
refs/heads/master
|
libs/caper/__init__.py
|
81
|
# Copyright 2013 Dean Gardiner <gardiner91@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from logr import Logr
from caper.matcher import FragmentMatcher
from caper.objects import CaperFragment, CaperClosure
from caper.parsers.anime import AnimeParser
from caper.parsers.scene import SceneParser
from caper.parsers.usenet import UsenetParser
__version_info__ = ('0', '3', '1')
__version_branch__ = 'master'
__version__ = "%s%s" % (
'.'.join(__version_info__),
'-' + __version_branch__ if __version_branch__ else ''
)
CL_START_CHARS = ['(', '[', '<', '>']
CL_END_CHARS = [')', ']', '<', '>']
CL_END_STRINGS = [' - ']
STRIP_START_CHARS = ''.join(CL_START_CHARS)
STRIP_END_CHARS = ''.join(CL_END_CHARS)
STRIP_CHARS = ''.join(['_', ' ', '.'])
FRAGMENT_SEPARATORS = ['.', '-', '_', ' ']
CL_START = 0
CL_END = 1
class Caper(object):
def __init__(self, debug=False):
self.debug = debug
self.parsers = {
'anime': AnimeParser,
'scene': SceneParser,
'usenet': UsenetParser
}
def _closure_split(self, name):
"""
:type name: str
:rtype: list of CaperClosure
"""
closures = []
def end_closure(closures, buf):
buf = buf.strip(STRIP_CHARS)
if len(buf) < 2:
return
cur = CaperClosure(len(closures), buf)
cur.left = closures[len(closures) - 1] if len(closures) > 0 else None
if cur.left:
cur.left.right = cur
closures.append(cur)
state = CL_START
buf = ""
for x, ch in enumerate(name):
# Check for start characters
if state == CL_START and ch in CL_START_CHARS:
end_closure(closures, buf)
state = CL_END
buf = ""
buf += ch
if state == CL_END and ch in CL_END_CHARS:
# End character found, create the closure
end_closure(closures, buf)
state = CL_START
buf = ""
elif state == CL_START and buf[-3:] in CL_END_STRINGS:
# End string found, create the closure
end_closure(closures, buf[:-3])
state = CL_START
buf = ""
end_closure(closures, buf)
return closures
def _clean_closure(self, closure):
"""
:type closure: str
:rtype: str
"""
return closure.lstrip(STRIP_START_CHARS).rstrip(STRIP_END_CHARS)
def _fragment_split(self, closures):
"""
:type closures: list of CaperClosure
:rtype: list of CaperClosure
"""
cur_position = 0
cur = None
def end_fragment(fragments, cur, cur_position):
cur.position = cur_position
cur.left = fragments[len(fragments) - 1] if len(fragments) > 0 else None
if cur.left:
cur.left_sep = cur.left.right_sep
cur.left.right = cur
cur.right_sep = ch
fragments.append(cur)
for closure in closures:
closure.fragments = []
separator_buffer = ""
for x, ch in enumerate(self._clean_closure(closure.value)):
if not cur:
cur = CaperFragment(closure)
if ch in FRAGMENT_SEPARATORS:
if cur.value:
separator_buffer = ""
separator_buffer += ch
if cur.value or not closure.fragments:
end_fragment(closure.fragments, cur, cur_position)
elif len(separator_buffer) > 1:
cur.value = separator_buffer.strip()
if cur.value:
end_fragment(closure.fragments, cur, cur_position)
separator_buffer = ""
# Reset
cur = None
cur_position += 1
else:
cur.value += ch
# Finish parsing the last fragment
if cur and cur.value:
end_fragment(closure.fragments, cur, cur_position)
# Reset
cur_position = 0
cur = None
return closures
def parse(self, name, parser='scene'):
closures = self._closure_split(name)
closures = self._fragment_split(closures)
# Print closures
for closure in closures:
Logr.debug("closure [%s]", closure.value)
for fragment in closure.fragments:
Logr.debug("\tfragment [%s]", fragment.value)
if parser not in self.parsers:
raise ValueError("Unknown parser")
# TODO autodetect the parser type
return self.parsers[parser](self.debug).run(closures)
|
huawenyu/Design-Patterns-in-C
|
refs/heads/master
|
auto-gen/tools/comn.py
|
1
|
def get_value_else_default(rdict, key, def_val):
rval = rdict.get(key, def_val)
if not rval:
rval = def_val
return rval
def parse_parameters(params_str):
params = params_str.split(',')
args = []
for one_param in params:
find = False
for idx in range(len(one_param)-1, 0, -1):
one_char = one_param[idx]
if one_char >= 'a' and one_char <= 'z' or \
one_char >= 'A' and one_char <= 'Z' or \
one_char >= '0' and one_char <= '9' or \
one_char == '_':
pass
else:
find = True
args.append(one_param[(idx+1):])
break
return params,args
def convert_to_class(myclasses_array_dict, class_name):
if myclasses_array_dict.has_key(class_name) and len(myclasses_array_dict[class_name]) > 0:
return myclasses_array_dict[class_name];
else:
raise Exception('class *{0}* not exist'.format(class_name))
|
Bloodyaugust/sugarlabcppboilerplate
|
refs/heads/master
|
lib/boost/libs/python/test/calling_conventions_mf.py
|
12
|
# Copyright Nicolas Lelong, 2010. Distributed under the Boost
# Software License, Version 1.0 (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
"""
>>> from calling_conventions_mf_ext import *
>>> x = X__cdecl()
>>> x.f0()
>>> x.g0()
>>> x.f1(1)
>>> x.g1(1)
>>> x.f2(1, 2)
>>> x.g2(1, 2)
>>> x.f3(1, 2, 3)
>>> x.g3(1, 2, 3)
>>> x.f4(1, 2, 3, 4)
>>> x.g4(1, 2, 3, 4)
>>> x.f5(1, 2, 3, 4, 5)
>>> x.g5(1, 2, 3, 4, 5)
>>> x.f6(1, 2, 3, 4, 5, 6)
>>> x.g6(1, 2, 3, 4, 5, 6)
>>> x.f7(1, 2, 3, 4, 5, 6, 7)
>>> x.g7(1, 2, 3, 4, 5, 6, 7)
>>> x.f8(1, 2, 3, 4, 5, 6, 7, 8)
>>> x.g8(1, 2, 3, 4, 5, 6, 7, 8)
>>> x.hash
2155
>>> x = X__stdcall()
>>> x.f0()
>>> x.g0()
>>> x.f1(1)
>>> x.g1(1)
>>> x.f2(1, 2)
>>> x.g2(1, 2)
>>> x.f3(1, 2, 3)
>>> x.g3(1, 2, 3)
>>> x.f4(1, 2, 3, 4)
>>> x.g4(1, 2, 3, 4)
>>> x.f5(1, 2, 3, 4, 5)
>>> x.g5(1, 2, 3, 4, 5)
>>> x.f6(1, 2, 3, 4, 5, 6)
>>> x.g6(1, 2, 3, 4, 5, 6)
>>> x.f7(1, 2, 3, 4, 5, 6, 7)
>>> x.g7(1, 2, 3, 4, 5, 6, 7)
>>> x.f8(1, 2, 3, 4, 5, 6, 7, 8)
>>> x.g8(1, 2, 3, 4, 5, 6, 7, 8)
>>> x.hash
2155
>>> x = X__fastcall()
>>> x.f0()
>>> x.g0()
>>> x.f1(1)
>>> x.g1(1)
>>> x.f2(1, 2)
>>> x.g2(1, 2)
>>> x.f3(1, 2, 3)
>>> x.g3(1, 2, 3)
>>> x.f4(1, 2, 3, 4)
>>> x.g4(1, 2, 3, 4)
>>> x.f5(1, 2, 3, 4, 5)
>>> x.g5(1, 2, 3, 4, 5)
>>> x.f6(1, 2, 3, 4, 5, 6)
>>> x.g6(1, 2, 3, 4, 5, 6)
>>> x.f7(1, 2, 3, 4, 5, 6, 7)
>>> x.g7(1, 2, 3, 4, 5, 6, 7)
>>> x.f8(1, 2, 3, 4, 5, 6, 7, 8)
>>> x.g8(1, 2, 3, 4, 5, 6, 7, 8)
>>> x.hash
2155
"""
def run(args = None):
import sys
import doctest
if args is not None:
sys.argv = args
return doctest.testmod(sys.modules.get(__name__))
if __name__ == '__main__':
print "running..."
import sys
status = run()[0]
if (status == 0): print "Done."
sys.exit(status)
|
eformat/vertx-web
|
refs/heads/master
|
vertx-web/src/test/sockjs-protocol/venv/lib/python2.7/site-packages/setuptools/command/register.py
|
475
|
from distutils.command.register import register as _register
class register(_register):
__doc__ = _register.__doc__
def run(self):
# Make sure that we are using valid current name/version info
self.run_command('egg_info')
_register.run(self)
|
ProjectSWGCore/NGECore2
|
refs/heads/master
|
scripts/object/tangible/wearables/bracelet/item_bracelet_r_set_officer_utility_b_01_01.py
|
2
|
import sys
def setup(core, object):
object.setStfFilename('static_item_n')
object.setStfName('item_bracelet_r_set_officer_utility_b_01_01')
object.setDetailFilename('static_item_d')
object.setDetailName('item_bracelet_r_set_officer_utility_b_01_01')
object.setStringAttribute('class_required', 'Officer')
object.setIntAttribute('required_combat_level', 85)
object.setIntAttribute('cat_skill_mod_bonus.@stat_n:expertise_action_line_of_aoe', 2)
object.setIntAttribute('cat_skill_mod_bonus.@stat_n:expertise_freeshot_of_aoe', 2)
object.setStringAttribute('@set_bonus:piece_bonus_count_3', '@set_bonus:set_bonus_officer_utility_b_1')
object.setStringAttribute('@set_bonus:piece_bonus_count_4', '@set_bonus:set_bonus_officer_utility_b_2')
object.setStringAttribute('@set_bonus:piece_bonus_count_5', '@set_bonus:set_bonus_officer_utility_b_3')
object.setAttachment('setBonus', 'set_bonus_officer_utility_b')
return
|
gurneyalex/OpenUpgrade
|
refs/heads/master
|
addons/web_tests_demo/__init__.py
|
423
|
from openerp.osv import orm, fields
class TestObject(orm.TransientModel):
_name = 'web_tests_demo.model'
_columns = {
'name': fields.char("Name", required=True),
'thing': fields.char("Thing"),
'other': fields.char("Other", required=True)
}
_defaults = {
'other': "bob"
}
|
sknepneklab/SAMoS
|
refs/heads/development
|
analysis/batch_nematic/batch_analyze_nematic_R16b.py
|
1
|
# ################################################################
#
# Active Particles on Curved Spaces (APCS)
#
# Author: Silke Henkes
#
# ICSMB, Department of Physics
# University of Aberdeen
# Author: Rastko Sknepnek
#
# Division of Physics
# School of Engineering, Physics and Mathematics
# University of Dundee
#
# (c) 2013, 2014
#
# This program cannot be used, copied, or modified without
# explicit permission of the author.
#
# ################################################################
# Integrator code for batch processing of full data runs (incorporating parts of earlier analysis scripts)
# Data interfacing
from read_data import *
from read_param import *
# Pre-existing analysis scripts
from nematic_analysis import *
#from glob import glob
# This is the structured data file hierarchy. Replace as appropriate (do not go the Yaouen way and fully automatize ...)
basefolder='/home/silke/Documents/CurrentProjects/Rastko/nematic/data/'
#basefolder = '/home/silke/Documents/CurrentProjects/Rastko/nematic/data/J_1_0_v0_1_0/'
#outfolder= '/home/silke/Documents/CurrentProjects/Rastko/nematic/data/J_1_0_v0_1_0/'
outfolder = '/home/silke/Documents/CurrentProjects/Rastko/nematic/data/'
#v0val=['0.3','0.5','0.7','1.5','2.0','3.0','7.0','10.0']
v0val=['2.0','3.0','7.0','10.0']
sigma=1
rval=['16.0']
nstep=10100000
nsave=5000
nsnap=int(nstep/nsave)
#skip=835
skip=1500
for r in rval:
for v0 in v0val:
#param = Param(basefolder)
files = sorted(glob(basefolder+'R_'+ r+ '/v0_' + v0 + '/sphere_*.dat'))[skip:]
defects=np.zeros((len(files),12))
ndefect=np.zeros((len(files),1))
u=0
for f in files:
print f
outname =outfolder +'R_'+ r+ '/v0_' + v0 + '/frame_data' + str(u)+'.vtk'
defects0,ndefect0=getDefects(f,float(r),sigma,outname,False,True)
defects[u,0:3]=defects0[0,:]
defects[u,3:6]=defects0[1,:]
defects[u,6:9]=defects0[2,:]
defects[u,9:12]=defects0[3,:]
ndefect[u]=ndefect0
outname = '.'.join((f).split('.')[:-1]) + '_defects.vtk'
outname =outfolder +'R_'+ r+ '/v0_' + v0 + '/frame_defects' + str(u)+'.vtk'
print outname
writeDefects(defects0,ndefect0,outname)
u+=1
#outfile2=outfolder + 'defects_v0_' + v0 + '_R_'+ r+ '.dat'
#np.savetxt(outfile2,np.concatenate((ndefect,defects),axis=1),fmt='%12.6g', header='ndefect defects')
|
deepmind/spriteworld
|
refs/heads/master
|
spriteworld/tasks.py
|
1
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""Tasks for Spriteworld.
Each class in this file defines a task. Namely, contains a reward function and a
success function for Spriteworld.
The reward function maps an iterable of sprites to a float. The success function
maps an iterable of sprites to a bool.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import numpy as np
import six
from sklearn import metrics
@six.add_metaclass(abc.ABCMeta)
class AbstractTask(object):
"""Abstract class from which all tasks should inherit."""
@abc.abstractmethod
def reward(self, sprites):
"""Compute reward for the given configuration of sprites.
This reward is evaluated per-step by the Spriteworld environment. See
Environment.step() in environment.py for usage. Hence if this is a smooth
function the agent will have shaped reward. Sparse rewards awarded only at
the end of an episode can be implemented by returning non-zero reward only
for a desired goal configuration of sprites (see sub-classes below for
examples).
Args:
sprites: Iterable of sprite instances.
Returns:
Float reward for the given configuration of sprites.
"""
@abc.abstractmethod
def success(self, sprites):
"""Compute whether the task has been successfully solved.
Args:
sprites: Iterable of sprite instances.
Returns:
Boolean. Whether or not the given configuration of sprites successfully
solves the task.
"""
class NoReward(AbstractTask):
"""Used for environments that have no task. Reward is always 0."""
def __init__(self):
pass
def reward(self, unused_sprites):
"""Calculate reward from sprites."""
return 0.0
def success(self, unused_sprites):
return False
class FindGoalPosition(AbstractTask):
"""Used for tasks that require moving some sprites to a goal position."""
def __init__(self,
filter_distrib=None,
goal_position=(0.5, 0.5),
terminate_distance=0.05,
terminate_bonus=0.0,
weights_dimensions=(1, 1),
sparse_reward=False,
raw_reward_multiplier=50):
"""Construct goal-finding task.
This task rewards the agent for bringing all sprites with factors contained
in a filter distribution to a goal position. Rewards are offset to be
negative, except for a termination bonus when the goal is reached.
Args:
filter_distrib: None or instance of
factor_distributions.AbstractDistribution. If None, all sprites must be
brought to the goal position. If not None, only sprites with factors
contained in this distribution must be brought to the goal position.
goal_position: Position of the goal.
terminate_distance: Distance from goal position at which to clip reward.
If all sprites are within this distance, terminate episode.
terminate_bonus: Extra bonus for getting all sprites within
terminate_distance.
weights_dimensions: Weights modifying the contributions of the (x,
y)-dimensions to the distance to goal computation.
sparse_reward: Boolean (default False), whether to provide dense rewards
or only reward at the end of an episode.
raw_reward_multiplier: Multiplier for the reward to be applied before
terminate_bonus. Empirically, 50 seems to be a good value.
"""
self._filter_distrib = filter_distrib
self._goal_position = np.asarray(goal_position)
self._terminate_bonus = terminate_bonus
self._terminate_distance = terminate_distance
self._sparse_reward = sparse_reward
self._weights_dimensions = np.asarray(weights_dimensions)
self._raw_reward_multiplier = raw_reward_multiplier
def _single_sprite_reward(self, sprite):
goal_distance = np.sum(self._weights_dimensions *
(sprite.position - self._goal_position)**2.)**0.5
raw_reward = self._terminate_distance - goal_distance
return self._raw_reward_multiplier * raw_reward
def _filtered_sprites_rewards(self, sprites):
"""Returns list of rewards for the filtered sprites."""
rewards = [
self._single_sprite_reward(s) for s in sprites if
self._filter_distrib is None or self._filter_distrib.contains(s.factors)
]
return rewards
def reward(self, sprites):
"""Calculate total reward summed over filtered sprites."""
reward = 0.
rewards = self._filtered_sprites_rewards(sprites)
if not rewards: # No sprites get through the filter, so make reward NaN
return np.nan
dense_reward = np.sum(rewards)
if all(np.array(rewards) >= 0): # task succeeded
reward += self._terminate_bonus
reward += dense_reward
elif not self._sparse_reward:
reward += dense_reward
return reward
def success(self, sprites):
return all(np.array(self._filtered_sprites_rewards(sprites)) >= 0)
class Clustering(AbstractTask):
"""Task for cluster by color/shape conditions."""
def __init__(self,
cluster_distribs,
termination_threshold=2.5,
terminate_bonus=0.0,
sparse_reward=False,
reward_range=10):
"""Reward depends on clustering sprites based on color/shape.
We indicate what feature matters for the clustering with the list of
cluster distribs. We can then compute intra-extra pairwise distances and use
the Davies-Bouldin clustering metric.
See https://en.wikipedia.org/wiki/Cluster_analysis#Internal_evaluation for
some discussion about different metrics.
Args:
cluster_distribs: list of factor distributions defining the clusters.
termination_threshold: Threshold that the metric should pass to terminate
an episode. Default of 2.5 seems to work well for 2 or 3 clusters.
terminate_bonus: Extra bonus upon task success.
sparse_reward: Boolean (default True), whether to provide dense shaping
rewards or just the sparse ones at the end of an episode.
reward_range: Scalar, specifies range [-reward_range, 0] we remap the
rewards to whenever possible.
"""
self._cluster_distribs = cluster_distribs
self._num_clusters = len(cluster_distribs)
self._termination_threshold = termination_threshold
self._terminate_bonus = terminate_bonus
self._sparse_reward = sparse_reward
self._reward_range = reward_range
def _cluster_assignments(self, sprites):
"""Return index of cluster for all sprites."""
clusters = -np.ones(len(sprites), dtype='int')
for i, sprite in enumerate(sprites):
for c_i, distrib in enumerate(self._cluster_distribs):
if distrib.contains(sprite.factors):
clusters[i] = c_i
break
return clusters
def _compute_clustering_metric(self, sprites):
"""Compute the different clustering metrics, higher should be better."""
# Get positions of sprites, and their cluster assignments
cluster_assignments = self._cluster_assignments(sprites)
positions = np.array([sprite.position for sprite in sprites])
# Ignore objects unassigned to any cluster
positions = positions[cluster_assignments >= 0]
cluster_assignments = cluster_assignments[cluster_assignments >= 0]
return 1. / metrics.davies_bouldin_score(positions, cluster_assignments)
def reward(self, sprites):
"""Calculate reward from sprites.
Recommendation: Use Davies-Bouldin, with termination_threshold left to auto.
Args:
sprites: list of Sprites.
Returns:
Reward, high when clustering is good.
"""
reward = 0.
metric = self._compute_clustering_metric(sprites)
# Low DB index is better clustering
dense_reward = (metric -
self._termination_threshold) * self._reward_range / 2.
if metric >= self._termination_threshold: # task succeeded
reward += self._terminate_bonus
reward += dense_reward
elif not self._sparse_reward:
reward += dense_reward
return reward
def success(self, sprites):
metric = self._compute_clustering_metric(sprites)
return metric >= self._termination_threshold
class MetaAggregated(AbstractTask):
"""Combines several tasks together."""
REWARD_AGGREGATOR = {
'sum': np.nansum,
'max': np.nanmax,
'min': np.nanmin,
'mean': np.nanmean
}
TERMINATION_CRITERION = {'all': np.all, 'any': np.any}
def __init__(self,
subtasks,
reward_aggregator='sum',
termination_criterion='all',
terminate_bonus=0.0):
"""MetaTasks which combines rewards between several subtasks.
Args:
subtasks: Iterable of Tasks.
reward_aggregator: (string) how to combine rewards together. One of
('sum', 'max', 'min', 'mean').
termination_criterion: (string) how to decide when to terminate, given
subtasks' termination signals. One of ('all', 'any')
terminate_bonus: Extra bonus for solving all subtasks, combined with
termination_criterion.
"""
if reward_aggregator not in MetaAggregated.REWARD_AGGREGATOR:
raise ValueError('Unknown reward_aggregator. {} not in {}'.format(
reward_aggregator, MetaAggregated.REWARD_AGGREGATOR))
if termination_criterion not in MetaAggregated.TERMINATION_CRITERION:
raise ValueError('Unknown termination_criterion. {} not in {}'.format(
termination_criterion, MetaAggregated.TERMINATION_CRITERION))
self._subtasks = subtasks
self._reward_aggregator = MetaAggregated.REWARD_AGGREGATOR[
reward_aggregator]
self._termination_criterion = MetaAggregated.TERMINATION_CRITERION[
termination_criterion]
self._terminate_bonus = terminate_bonus
def reward(self, sprites):
rewards = self._reward_aggregator(
[task.reward(sprites) for task in self._subtasks])
rewards += self._terminate_bonus * self.success(sprites)
return rewards
def success(self, sprites):
return self._termination_criterion(
[task.success(sprites) for task in self._subtasks])
|
google/mirandum
|
refs/heads/master
|
alerts/donations/migrations/0002_toplist.py
|
1
|
# -*- coding: utf-8 -*-
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('donations', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='TopList',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('count', models.IntegerField(default=1)),
('format', models.CharField(default=b'[[name]]: [[currencysymbol]][[amount]]', max_length=1000)),
('seperator', models.CharField(default=b', ', max_length=100)),
('font', models.CharField(max_length=255, null=True, blank=True)),
('font_size', models.CharField(max_length=255, null=True, blank=True)),
('font_color', models.CharField(max_length=255, null=True, blank=True)),
('days', models.IntegerField(null=True, blank=True)),
('type', models.CharField(default=b'session', max_length=50, choices=[(b'session', b'Session'), (b'limited', b'Number of Days'), (b'alltime', b'All Time')])),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
]
|
krzychb/rtd-test-bed
|
refs/heads/master
|
components/esptool_py/esptool/test/test_imagegen.py
|
2
|
#!/usr/bin/env python
import os
import os.path
import subprocess
import struct
import sys
import unittest
import hashlib
from elftools.elf.elffile import ELFFile
TEST_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), "elf2image")
os.chdir(TEST_DIR)
try:
ESPTOOL_PY = os.environ["ESPTOOL_PY"]
except KeyError:
ESPTOOL_PY = os.path.join(TEST_DIR, "../..", "esptool.py")
# import the version of esptool we are testing with
sys.path.append(os.path.dirname(ESPTOOL_PY))
import esptool
def try_delete(path):
try:
os.remove(path)
except OSError:
pass
def segment_matches_section(segment, section):
""" segment is an ImageSegment from an esptool binary.
section is an elftools ELF section
Returns True if they match
"""
sh_size = (section.header.sh_size + 0x3) & ~3 # pad length of ELF sections
return (section.header.sh_addr == segment.addr
and sh_size == len(segment.data))
class BaseTestCase(unittest.TestCase):
def assertEqualHex(self, expected, actual, message=None):
try:
expected = hex(expected)
except TypeError: # if expected is character
expected = hex(ord(expected))
try:
actual = hex(actual)
except TypeError: # if actual is character
actual = hex(ord(actual))
self.assertEqual(expected, actual, message)
def assertImageContainsSection(self, image, elf, section_name):
"""
Assert an esptool binary image object contains
the data for a particular ELF section.
"""
with open(elf, "rb") as f:
e = ELFFile(f)
section = e.get_section_by_name(section_name)
self.assertTrue(section, "%s should be in the ELF" % section_name)
sh_addr = section.header.sh_addr
data = section.data()
# section contents may be smeared across multiple image segments,
# so look through each segment and remove it from ELF section 'data'
# as we find it in the image segments. When we're done 'data' should
# all be accounted for
for seg in sorted(image.segments, key=lambda s:s.addr):
print("comparing seg 0x%x sec 0x%x len 0x%x" % (seg.addr, sh_addr, len(data)))
if seg.addr == sh_addr:
overlap_len = min(len(seg.data), len(data))
self.assertEqual(data[:overlap_len], seg.data[:overlap_len],
"ELF '%s' section has mis-matching binary image data" % section_name)
sh_addr += overlap_len
data = data[overlap_len:]
# no bytes in 'data' should be left unmatched
self.assertEqual(0, len(data),
"ELF %s section '%s' has no encompassing segment(s) in binary image (image segments: %s)"
% (elf, section_name, image.segments))
def assertImageInfo(self, binpath, chip="esp8266"):
"""
Run esptool.py image_info on a binary file,
assert no red flags about contents.
"""
cmd = [ sys.executable, ESPTOOL_PY, "--chip", chip, "image_info", binpath ]
try:
output = subprocess.check_output(cmd).decode("utf-8")
print(output)
except subprocess.CalledProcessError as e:
print(e.output)
raise
self.assertFalse("invalid" in output, "Checksum calculation should be valid")
self.assertFalse("warning" in output.lower(), "Should be no warnings in image_info output")
def run_elf2image(self, chip, elf_path, version=None, extra_args=[]):
""" Run elf2image on elf_path """
cmd = [sys.executable, ESPTOOL_PY, "--chip", chip, "elf2image" ]
if version is not None:
cmd += [ "--version", str(version) ]
cmd += [ elf_path ] + extra_args
print("Executing %s" % (" ".join(cmd)))
try:
output = str(subprocess.check_output(cmd))
print(output)
self.assertFalse("warning" in output.lower(), "elf2image should not output warnings")
except subprocess.CalledProcessError as e:
print(e.output)
raise
class ESP8266V1ImageTests(BaseTestCase):
ELF="esp8266-nonosssdk20-iotdemo.elf"
BIN_LOAD="esp8266-nonosssdk20-iotdemo.elf-0x00000.bin"
BIN_IROM="esp8266-nonosssdk20-iotdemo.elf-0x10000.bin"
def setUp(self):
self.run_elf2image("esp8266", self.ELF, 1)
def tearDown(self):
try_delete(self.BIN_LOAD)
try_delete(self.BIN_IROM)
def test_irom_bin(self):
with open(self.ELF, "rb") as f:
e = ELFFile(f)
irom_section = e.get_section_by_name(".irom0.text")
self.assertEqual(irom_section.header.sh_size,
os.stat(self.BIN_IROM).st_size,
"IROM raw binary file should be same length as .irom0.text section")
def test_loaded_sections(self):
image = esptool.LoadFirmwareImage("esp8266", self.BIN_LOAD)
self.assertEqual(3, len(image.segments))
self.assertImageContainsSection(image, self.ELF, ".data")
self.assertImageContainsSection(image, self.ELF, ".text")
self.assertImageContainsSection(image, self.ELF, ".rodata")
class ESP8266V12SectionHeaderNotAtEnd(BaseTestCase):
""" Ref https://github.com/espressif/esptool/issues/197 -
this ELF image has the section header not at the end of the file """
ELF="esp8266-nonossdkv12-example.elf"
BIN_LOAD=ELF+"-0x00000.bin"
BIN_IROM=ELF+"-0x40000.bin"
def test_elf_section_header_not_at_end(self):
self.run_elf2image("esp8266", self.ELF)
image = esptool.LoadFirmwareImage("esp8266", self.BIN_LOAD)
self.assertEqual(3, len(image.segments))
self.assertImageContainsSection(image, self.ELF, ".data")
self.assertImageContainsSection(image, self.ELF, ".text")
self.assertImageContainsSection(image, self.ELF, ".rodata")
def tearDown(self):
try_delete(self.BIN_LOAD)
try_delete(self.BIN_IROM)
class ESP8266V2ImageTests(BaseTestCase):
def _test_elf2image(self, elfpath, binpath):
try:
self.run_elf2image("esp8266", elfpath, 2)
image = esptool.LoadFirmwareImage("esp8266", binpath)
self.assertEqual(4, len(image.segments))
self.assertImageContainsSection(image, elfpath, ".data")
self.assertImageContainsSection(image, elfpath, ".text")
self.assertImageContainsSection(image, elfpath, ".rodata")
irom_segment = image.segments[0]
self.assertEqual(0, irom_segment.addr,
"IROM segment 'load address' should be zero")
with open(elfpath, "rb") as f:
e = ELFFile(f)
sh_size = (e.get_section_by_name(".irom0.text").header.sh_size + 15) & ~15
self.assertEqual(len(irom_segment.data), sh_size, "irom segment (0x%x) should be same size (16 padded) as .irom0.text section (0x%x)" % (len(irom_segment.data), sh_size))
# check V2 CRC (for ESP8266 SDK bootloader)
with open(binpath, "rb") as f:
f.seek(-4, os.SEEK_END)
image_len = f.tell()
crc_stored = struct.unpack("<I", f.read(4))[0]
f.seek(0)
crc_calc = esptool.esp8266_crc32(f.read(image_len))
self.assertEqual(crc_stored, crc_calc)
# test imageinfo doesn't fail
self.assertImageInfo(binpath)
finally:
try_delete(binpath)
def test_nonossdkimage(self):
ELF="esp8266-nonossdkv20-at-v2.elf"
BIN="esp8266-nonossdkv20-at-v2-0x01000.bin"
self._test_elf2image(ELF, BIN)
def test_espopenrtosimage(self):
ELF="esp8266-openrtos-blink-v2.elf"
BIN="esp8266-openrtos-blink-v2-0x02000.bin"
self._test_elf2image(ELF, BIN)
class ESP32ImageTests(BaseTestCase):
def _test_elf2image(self, elfpath, binpath):
try:
self.run_elf2image("esp32", elfpath)
image = esptool.LoadFirmwareImage("esp32", binpath)
self.assertImageInfo(binpath, "esp32")
return image
finally:
try_delete(binpath)
def test_bootloader(self):
ELF="esp32-bootloader.elf"
BIN="esp32-bootloader.bin"
image = self._test_elf2image(ELF, BIN)
self.assertEqual(3, len(image.segments))
for section in [ ".iram1.text", ".iram_pool_1.text",
".dram0.rodata"]:
self.assertImageContainsSection(image, ELF, section)
def test_app_template(self):
ELF="esp32-app-template.elf"
BIN="esp32-app-template.bin"
image = self._test_elf2image(ELF, BIN)
self.assertEqual(6, len(image.segments))
# the other segment is a padding segment
for section in [ ".iram0.text", ".iram0.vectors",
".dram0.data", ".flash.rodata",
".flash.text" ]:
self.assertImageContainsSection(image, ELF, section)
def test_too_many_sections(self):
ELF="esp32-too-many-sections.elf"
BIN="esp32-too-many-sections.bin"
with self.assertRaises(subprocess.CalledProcessError) as e:
self._test_elf2image(ELF, BIN)
output = e.exception.output
self.assertIn(b"max 16", output)
self.assertIn(b"linker script", output)
class ESP8266FlashHeaderTests(BaseTestCase):
def test_2mb(self):
ELF="esp8266-nonossdkv20-at-v2.elf"
BIN="esp8266-nonossdkv20-at-v2-0x01000.bin"
try:
self.run_elf2image("esp8266", ELF, version=2, extra_args=["--flash_size", "2MB", "--flash_mode", "dio"])
with open(BIN, "rb") as f:
header = f.read(4)
print("header %r" % header)
self.assertEqualHex(0xea, header[0])
self.assertEqualHex(0x02, header[2])
self.assertEqualHex(0x30, header[3])
finally:
try_delete(BIN)
class ESP32FlashHeaderTests(BaseTestCase):
def test_16mb(self):
ELF="esp32-app-template.elf"
BIN="esp32-app-template.bin"
try:
self.run_elf2image("esp32", ELF, extra_args=["--flash_size", "16MB", "--flash_mode", "dio"])
with open(BIN, "rb") as f:
header = f.read(4)
self.assertEqualHex(0xe9, header[0])
self.assertEqualHex(0x02, header[2])
self.assertEqualHex(0x40, header[3])
finally:
try_delete(BIN)
class ELFSHA256Tests(BaseTestCase):
ELF = "esp32-app-template.elf"
SHA_OFFS = 0xb0 # absolute offset of the SHA in the .bin file
BIN = "esp32-app-template.bin"
def test_binary_patched(self):
self.run_elf2image("esp32", self.ELF, extra_args=["--elf-sha256-offset", "0x%x" % self.SHA_OFFS])
image = esptool.LoadFirmwareImage("esp32", self.BIN)
rodata_segment = image.segments[0]
observed_sha256 = rodata_segment.data[self.SHA_OFFS-0x20:self.SHA_OFFS-0x20+32] # subtract 0x20 byte header here
sha256 = hashlib.sha256()
with open(self.ELF, "rb") as f:
expected_sha256 = hashlib.sha256(f.read()).digest()
self.assertSequenceEqual(expected_sha256, observed_sha256)
def test_no_overwrite_data(self):
with self.assertRaises(subprocess.CalledProcessError) as e:
self.run_elf2image("esp32", "esp32-bootloader.elf", extra_args=["--elf-sha256-offset", "0xb0"])
output = e.exception.output
self.assertIn(b"SHA256", output)
self.assertIn(b"zero", output)
if __name__ == '__main__':
print("Running image generation tests...")
unittest.main(buffer=True)
|
FSX/misaka
|
refs/heads/master
|
tests/test_xss_protection.py
|
3
|
# -*- coding: utf-8 -*-
from chibitest import TestCase, ok
from misaka import escape_html, Markdown, SaferHtmlRenderer
class EscapeHtmlTest(TestCase):
def test_escape_html(self):
ok(escape_html('a&<>"\'/')) == 'a&<>"'/'
def test_escape_html_slash(self):
ok(escape_html('a&<>"\'/', True)) == 'a&<>"'/'
render = Markdown(SaferHtmlRenderer())
render_escape = Markdown(SaferHtmlRenderer(sanitization_mode='escape'))
renderer_rewrite = SaferHtmlRenderer(
link_rewrite='//example.com/redirect/{url}',
img_src_rewrite='//img_proxy/{url}',
)
render_rewrite = Markdown(renderer_rewrite)
rewrite_url = renderer_rewrite.rewrite_url
class SaferHtmlRendererTest(TestCase):
def test_html_skip(self):
actual = render('Example <script>alert(1);</script>')
expected = '<p>Example alert(1);</p>\n'
ok(actual).diff(expected)
html = render('<sc<script>ript>xss</sc</script>ript>')
ok(html).not_contains('<sc')
ok(html).not_contains('ript>')
actual = render('<span><a href="javascript:xss">foo</a></span>')
expected = '<p>foo</p>\n'
ok(actual).diff(expected)
def test_html_escape(self):
supplied = 'Example <script>alert(1);</script>'
expected = '<p>%s</p>\n' % escape_html(supplied)
ok(render_escape(supplied)).diff(expected)
html = render_escape('<sc<script>ript>xss</sc</script>ript>')
ok(html).not_contains('<sc')
ok(html).not_contains('ript>')
supplied = '<span><a href="javascript:xss">foo</a></span>'
expected = '<p>%s</p>\n' % escape_html(supplied)
ok(render_escape(supplied)).diff(expected)
def test_autolink_filtering_with_nice_data(self):
for url in ('http://a', "https://b?x&y"):
actual = render('<%s>' % url)
expected = '<p><a href="{0}">{0}</a></p>\n'.format(escape_html(url))
ok(actual).diff(expected)
supplied = "<alice@example.net>"
expected = '<p>%s</p>\n' % escape_html(supplied)
ok(render_escape(supplied)).diff(expected)
def test_autolink_filtering_with_naughty_data(self):
actual = render('<javascript:foo>')
expected = '<p><javascript:foo></p>\n'
ok(actual).diff(expected)
url = 'javascript:0'
encoded_url = ''.join('&x{0:x};'.format(ord(c)) for c in url)
html = render('<%s>' % encoded_url)
ok(html).not_contains(url)
def test_link_filtering_with_nice_data(self):
for url in ('http://a', 'https://b'):
actual = render("['foo](%s \"bar'\")" % url)
expected = '<p><a href="{0}" title="bar'">'foo</a></p>\n'.format(url)
ok(actual).diff(expected)
def test_link_filtering_with_naughty_data(self):
supplied = '[foo](javascript:xss)'
expected = '<p>%s</p>\n' % escape_html(supplied)
ok(render(supplied)).diff(expected)
html = render('[foo](unknown:bar)')
expected = '<p>%s</p>\n' % escape_html(supplied)
ok(render(supplied)).diff(expected)
html = render('[" xss><xss>]("><xss>)')
ok(html).not_contains('<xss>')
ok(html).not_contains('" xss')
html = render('[" xss><xss>](https:"><xss>)')
ok(html).not_contains('<xss>')
ok(html).not_contains('" xss')
def test_image_src_filtering_with_nice_data(self):
actual = render('')
expected = '<p><img src="http:"foo"" /></p>\n'
ok(actual).diff(expected)
actual = render('')
expected = '<p><img src="https://example.org/" alt=""bar"" title="'title'" /></p>\n'
ok(actual).diff(expected)
def test_image_src_filtering_with_naughty_data(self):
actual = render('')
expected = '<p></p>\n'
ok(actual).diff(expected)
def test_autolink_rewriting(self):
for url in ('http://a', 'https://b?x&y'):
actual = render_rewrite('<%s>' % url)
expected = '<p><a href="%s">%s</a></p>\n'
expected %= (rewrite_url(url), escape_html(url))
ok(actual).diff(expected)
supplied = "<alice@example.net>"
expected = '<p>%s</p>\n' % escape_html(supplied)
ok(render_escape(supplied)).diff(expected)
def test_link_rewriting(self):
for url in ('http://a', 'https://b'):
actual = render_rewrite("['foo](%s \"bar'\")" % url)
expected = '<p><a href="%s" title="bar'">'foo</a></p>\n' % rewrite_url(url)
ok(actual).diff(expected)
def test_image_src_rewriting(self):
actual = render_rewrite('')
expected = '<p><img src="//img_proxy/http%3A%22foo%22" /></p>\n'
ok(actual).diff(expected)
def test_bug_pyformat_in_content(self):
# See: https://github.com/FSX/misaka/issues/66
actual = render('[](http://www.example.com/])')
expected = '<p><a href="http://www.example.com/]"><img src="http://www.example.com/?v=%s" alt="xxx" /></a></p>\n'
ok(actual).diff(expected)
|
danmergens/mi-instrument
|
refs/heads/master
|
mi/dataset/parser/test/test_velpt_ab.py
|
7
|
#!/usr/bin/env python
"""
@package mi.dataset.parser.test.test_velpt_ab
@file mi-dataset/mi/dataset/parser/test/test_velpt_ab_dcl.py
@author Chris Goodrich
@brief Test code for the velpt_ab parser
"""
__author__ = 'Chris Goodrich'
import os
import re
from nose.plugins.attrib import attr
from mi.core.exceptions import ConfigurationException
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.driver.velpt_ab.resource import RESOURCE_PATH
from mi.dataset.parser.common_regexes import FLOAT_REGEX, END_OF_LINE_REGEX
from mi.dataset.parser.velpt_ab import VelptAbParser, VelptAbParticleClassKey
from mi.dataset.parser.velpt_ab_particles import VelptAbInstrumentDataParticle,\
VelptAbDiagnosticsHeaderParticle, VelptAbDiagnosticsDataParticle, VelptAbInstrumentMetadataParticle
from mi.dataset.test.test_parser import ParserUnitTestCase
from mi.logging import log
@attr('UNIT', group='mi')
class VelptAbParserUnitTestCase(ParserUnitTestCase):
"""
velpt_ab_dcl Parser unit test suite
"""
def setUp(self):
ParserUnitTestCase.setUp(self)
self._parser_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.velpt_ab_particles',
DataSetDriverConfigKeys.PARTICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
VelptAbParticleClassKey.METADATA_PARTICLE_CLASS: VelptAbDiagnosticsHeaderParticle,
VelptAbParticleClassKey.DIAGNOSTICS_PARTICLE_CLASS: VelptAbDiagnosticsDataParticle,
VelptAbParticleClassKey.INSTRUMENT_METADATA_PARTICLE_CLASS: VelptAbInstrumentMetadataParticle,
VelptAbParticleClassKey.INSTRUMENT_PARTICLE_CLASS: VelptAbInstrumentDataParticle
}
}
self._incomplete_parser_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.velpt_ab_dcl_particles',
DataSetDriverConfigKeys.PARTICLE_CLASS: None
}
self._bad_parser_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.velpt_ab_particles',
DataSetDriverConfigKeys.PARTICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {}
}
def test_simple(self):
"""
Read files and verify that all expected particles can be read.
Verify that the contents of the particles are correct.
This is the happy path test.
"""
log.debug('===== START TEST SIMPLE =====')
# Test the telemetered version
with open(os.path.join(RESOURCE_PATH, 'VELPT_SN_11402_2014-07-02.aqd'), 'rb') as file_handle:
num_particles_to_request = num_expected_particles = 72
parser = VelptAbParser(self._parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'VELPT_SN_11402_2014-07-02.yml', RESOURCE_PATH)
log.debug('===== END TEST SIMPLE =====')
def test_jumbled(self):
"""
Read files and verify that all expected particles can be read.
This particular data file has the velocity data records
preceded by the diagnostics records, a situation not likely
to occur on a deployed instrument but anything is possible!
The logic in the parser will not produce an instrument metadata
particle (configuration data) until it encounters a velocity or
a diagnostics record. Assumes that all the configuration records are
at the beginning of the file. This is reasonable as the instrument is
configured before being deployed. So the config records would be stored
first. Verify that the contents of the particles are correct.
There should be no exceptions generated.
"""
log.debug('===== START TEST SIMPLE NOT IN ORDER =====')
# Test the telemetered version
with open(os.path.join(RESOURCE_PATH, 'jumbled_VELPT_SN_11402_2014-07-02.aqd'), 'rb') as file_handle:
num_particles_to_request = num_expected_particles = 72
parser = VelptAbParser(self._parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'jumbled_VELPT_SN_11402_2014-07-02.yml', RESOURCE_PATH)
log.debug('===== END TEST SIMPLE NOT IN ORDER =====')
def test_too_few_diagnostics_records(self):
"""
The file used in this test has only 19 diagnostics records in the second set.
Twenty are expected. The records are all still processed.
The error is simply noted.
"""
log.debug('===== START TEST NOT ENOUGH DIAGNOSTICS RECORDS =====')
with open(os.path.join(RESOURCE_PATH, 'too_few_VELPT_SN_11402_2014-07-02.aqd'), 'rb') as file_handle:
num_particles_to_request = num_expected_particles = 71
parser = VelptAbParser(self._parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'too_few_VELPT_SN_11402_2014-07-02.yml', RESOURCE_PATH)
log.debug('===== END TEST NOT ENOUGH DIAGNOSTICS RECORDS =====')
def test_too_many_diagnostics_records(self):
"""
The file used in this test has 21 diagnostics records in the second set.
Twenty are expected. The records are all still processed.
The error is simply noted.
"""
log.debug('===== START TEST TOO MANY DIAGNOSTICS RECORDS =====')
with open(os.path.join(RESOURCE_PATH, 'too_many_VELPT_SN_11402_2014-07-02.aqd'), 'rb') as file_handle:
num_particles_to_request = num_expected_particles = 73
parser = VelptAbParser(self._parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'too_many_VELPT_SN_11402_2014-07-02.yml', RESOURCE_PATH)
log.debug('===== END TEST TOO MANY DIAGNOSTICS RECORDS =====')
def test_invalid_sync_byte(self):
"""
The file used in this test has extra bytes between records which need to be skipped
in order to process the correct number of particles. All records are still processed.
"""
log.debug('===== START TEST INVALID SYNC BYTE =====')
with open(os.path.join(RESOURCE_PATH, 'extra_bytes_VELPT_SN_11402_2014-07-02.aqd'), 'rb') as file_handle:
num_particles_to_request = num_expected_particles = 72
parser = VelptAbParser(self._parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'VELPT_SN_11402_2014-07-02.yml', RESOURCE_PATH)
log.debug('===== END TEST INVALID SYNC BYTE =====')
def test_invalid_record_id(self):
"""
The file used in this test has one record with an invalid ID byte.
This results in 71 particles being retrieved instead of 72.
"""
log.debug('===== START TEST INVALID RECORD ID =====')
with open(os.path.join(RESOURCE_PATH, 'bad_id_VELPT_SN_11402_2014-07-02.aqd'), 'rb') as file_handle:
num_particles_to_request = num_expected_particles = 71
parser = VelptAbParser(self._parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'bad_id_VELPT_SN_11402_2014-07-02.yml', RESOURCE_PATH)
log.debug('===== END TEST INVALID RECORD ID =====')
def test_truncated_file(self):
"""
The file used in this test has a malformed (too short) record at
the end of the file.This results in 71 particles being retrieved
instead of 72.
"""
log.debug('===== START TEST FOUND TRUNCATED FILE =====')
with open(os.path.join(RESOURCE_PATH, 'truncated_VELPT_SN_11402_2014-07-02.aqd'), 'rb') as file_handle:
num_particles_to_request = num_expected_particles = 71
parser = VelptAbParser(self._parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'truncated_VELPT_SN_11402_2014-07-02.yml', RESOURCE_PATH)
log.debug('===== END TEST FOUND TRUNCATED FILE =====')
def test_bad_velocity_checksum(self):
"""
The file used in this test has a record with a bad checksum.
This results in 71 particles being retrieved instead of 72.
"""
log.debug('===== START TEST FOUND BAD VELOCITY CHECKSUM =====')
with open(os.path.join(RESOURCE_PATH, 'bad_velocity_checksum_VELPT_SN_11402_2014-07-02.aqd'), 'rb') as \
file_handle:
num_particles_to_request = num_expected_particles = 71
parser = VelptAbParser(self._parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'bad_velocity_checksum_VELPT_SN_11402_2014-07-02.yml', RESOURCE_PATH)
log.debug('===== END TEST FOUND BAD VELOCITY CHECKSUM =====')
def test_bad_diagnostic_checksum(self):
"""
The file used in this test has a record with a bad checksum.
This results in 71 particles being retrieved instead of 72.
"""
log.debug('===== START TEST FOUND BAD DIAGNOSTICS CHECKSUM =====')
with open(os.path.join(RESOURCE_PATH, 'bad_diag_checksum_VELPT_SN_11402_2014-07-02.aqd'), 'rb') as file_handle:
num_particles_to_request = num_expected_particles = 71
parser = VelptAbParser(self._parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'too_few_VELPT_SN_11402_2014-07-02.yml', RESOURCE_PATH)
log.debug('===== END TEST FOUND BAD DIAGNOSTICS CHECKSUM =====')
def test_missing_hardware_config(self):
"""
The file used in this test has no hardware configuration record.
Instrument metadata will still be produced but the fields from
the hardware config will NOT be included.
"""
log.debug('===== START TEST MISSING HARDWARE CONFIG =====')
with open(os.path.join(RESOURCE_PATH, 'no_hardware_config_VELPT_SN_11402_2014-07-02.aqd'), 'rb') as file_handle:
num_particles_to_request = num_expected_particles = 72
parser = VelptAbParser(self._parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'no_hardware_config_VELPT_SN_11402_2014-07-02.yml', RESOURCE_PATH)
log.debug('===== END TEST MISSING HARDWARE CONFIG =====')
def test_missing_head_config(self):
"""
The file used in this test has no head configuration record.
Instrument metadata will still be produced but the fields from
the head config will NOT be included.
"""
log.debug('===== START TEST MISSING HEAD CONFIG =====')
with open(os.path.join(RESOURCE_PATH, 'no_head_config_VELPT_SN_11402_2014-07-02.aqd'), 'rb') as file_handle:
num_particles_to_request = num_expected_particles = 72
parser = VelptAbParser(self._parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'no_head_config_VELPT_SN_11402_2014-07-02.yml', RESOURCE_PATH)
log.debug('===== END TEST MISSING HEAD CONFIG =====')
def test_missing_user_config(self):
"""
The file used in this test has no user configuration record.
Instrument metadata will still be produced but the fields from
the user config will NOT be included.
"""
log.debug('===== START TEST MISSING USER CONFIG =====')
with open(os.path.join(RESOURCE_PATH, 'no_user_config_VELPT_SN_11402_2014-07-02.aqd'), 'rb') as file_handle:
num_particles_to_request = num_expected_particles = 72
parser = VelptAbParser(self._parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'no_user_config_VELPT_SN_11402_2014-07-02.yml', RESOURCE_PATH)
log.debug('===== END TEST MISSING USER CONFIG =====')
def test_missing_all_config(self):
"""
The file used in this test has no user configuration record.
Instrument metadata will still be produced but the fields from
the user config will NOT be included.
"""
log.debug('===== START TEST MISSING ALL CONFIG RECORDS =====')
with open(os.path.join(RESOURCE_PATH, 'no_config_recs_VELPT_SN_11402_2014-07-02.aqd'), 'rb') as file_handle:
num_particles_to_request = num_expected_particles = 72
parser = VelptAbParser(self._parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'no_config_recs_VELPT_SN_11402_2014-07-02.yml', RESOURCE_PATH)
log.debug('===== END TEST MISSING ALL CONFIG RECORDS =====')
def test_head_config_bad_checksum(self):
"""
The file used in this test has a bad checksum in the head configuration record.
Instrument metadata will still be produced but the fields from
the head config will NOT be included.
"""
log.debug('===== START TEST HEAD CONFIG BAD CHECKSUM =====')
with open(os.path.join(RESOURCE_PATH, 'bad_checksum_in_head_config_VELPT_SN_11402_2014-07-02.aqd'), 'rb')\
as file_handle:
num_particles_to_request = num_expected_particles = 72
parser = VelptAbParser(self._parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'no_head_config_VELPT_SN_11402_2014-07-02.yml', RESOURCE_PATH)
log.debug('===== END TEST HEAD CONFIG BAD CHECKSUM =====')
def test_hardware_config_bad_checksum(self):
"""
The file used in this test has a bad checksum in the hardware configuration record.
Instrument metadata will still be produced but the fields from
the hardware config will NOT be included.
"""
log.debug('===== START TEST HARDWARE CONFIG BAD CHECKSUM =====')
with open(os.path.join(RESOURCE_PATH, 'bad_checksum_in_hardware_config_VELPT_SN_11402_2014-07-02.aqd'), 'rb')\
as file_handle:
num_particles_to_request = num_expected_particles = 72
parser = VelptAbParser(self._parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'no_hardware_config_VELPT_SN_11402_2014-07-02.yml', RESOURCE_PATH)
log.debug('===== END TEST HARDWARE CONFIG BAD CHECKSUM =====')
def test_user_config_bad_checksum(self):
"""
The file used in this test has a bad checksum in the head configuration record.
Instrument metadata will still be produced but the fields from
the head config will NOT be included.
"""
log.debug('===== START TEST USER CONFIG BAD CHECKSUM =====')
with open(os.path.join(RESOURCE_PATH, 'bad_checksum_in_user_config_VELPT_SN_11402_2014-07-02.aqd'), 'rb')\
as file_handle:
num_particles_to_request = num_expected_particles = 72
parser = VelptAbParser(self._parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'no_user_config_VELPT_SN_11402_2014-07-02.yml', RESOURCE_PATH)
log.debug('===== END TEST USER CONFIG BAD CHECKSUM =====')
def test_diag_header_bad_checksum(self):
"""
The file used in this test has a bad checksum in the head configuration record.
Instrument metadata will still be produced but the fields from
the head config will NOT be included.
"""
log.debug('===== START TEST DIAGNOSTICS HEADER BAD CHECKSUM =====')
with open(os.path.join(RESOURCE_PATH, 'bad_checksum_in_diag_header_VELPT_SN_11402_2014-07-02.aqd'), 'rb')\
as file_handle:
num_particles_to_request = num_expected_particles = 71
parser = VelptAbParser(self._parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'no_diag_header_VELPT_SN_11402_2014-07-02.yml', RESOURCE_PATH)
log.debug('===== END TEST DIAGNOSTICS HEADER BAD CHECKSUM =====')
def test_missing_diag_header(self):
"""
The file used in this test has a bad checksum in the head configuration record.
Instrument metadata will still be produced but the fields from
the head config will NOT be included.
"""
log.debug('===== START TEST MISSING DIAGNOSTICS HEADER =====')
with open(os.path.join(RESOURCE_PATH, 'no_diag_header_VELPT_SN_11402_2014-07-02.aqd'), 'rb')\
as file_handle:
num_particles_to_request = num_expected_particles = 71
parser = VelptAbParser(self._parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'no_diag_header_VELPT_SN_11402_2014-07-02.yml', RESOURCE_PATH)
log.debug('===== END TEST MISSING DIAGNOSTICS HEADER =====')
def test_random_diag_record(self):
"""
The file used in this test has a bad checksum in the head configuration record.
Instrument metadata will still be produced but the fields from
the head config will NOT be included.
"""
log.debug('===== START TEST RANDOM DIAGNOSTIC RECORD FOUND =====')
with open(os.path.join(RESOURCE_PATH, 'random_diag_record_VELPT_SN_11402_2014-07-02.aqd'), 'rb')\
as file_handle:
num_particles_to_request = num_expected_particles = 72
parser = VelptAbParser(self._parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'random_diag_record_VELPT_SN_11402_2014-07-02.yml', RESOURCE_PATH)
log.debug('===== END TEST RANDOM DIAGNOSTIC RECORD FOUND =====')
def test_no_diag_recs(self):
"""
The file used in this test has a single diagnostic header record but no diagnostic
records. No diagnostic particles will be produced.
"""
log.debug('===== START TEST NO DIAGNOSTIC RECORDS FOUND =====')
with open(os.path.join(RESOURCE_PATH, 'no_diag_recs_VELPT_SN_11402_2014-07-02.aqd'), 'rb')\
as file_handle:
num_particles_to_request = num_expected_particles = 51
parser = VelptAbParser(self._parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'no_diag_recs_VELPT_SN_11402_2014-07-02.yml', RESOURCE_PATH)
log.debug('===== END TEST NO DIAGNOSTIC RECORDS FOUND =====')
def test_bad_configuration(self):
"""
Attempt to build a parser with a bad configuration.
"""
log.debug('===== START TEST BAD CONFIGURATION =====')
with open(os.path.join(RESOURCE_PATH, 'VELPT_SN_11402_2014-07-02.aqd'), 'rb') as file_handle:
with self.assertRaises(ConfigurationException):
parser = VelptAbParser(self._bad_parser_config,
file_handle,
self.exception_callback)
log.debug('===== END TEST BAD CONFIGURATION =====')
def test_partial_configuration(self):
"""
Attempt to build a parser with a bad configuration.
"""
log.debug('===== START TEST PARTIAL CONFIGURATION =====')
with open(os.path.join(RESOURCE_PATH, 'VELPT_SN_11402_2014-07-02.aqd'), 'rb') as file_handle:
with self.assertRaises(ConfigurationException):
parser = VelptAbParser(self._incomplete_parser_config,
file_handle,
self.exception_callback)
log.debug('===== END TEST PARTIAL CONFIGURATION =====')
def test_bad_diag_checksum_19_recs(self):
"""
The file used in this test has a power record with a missing timestamp.
This results in 9 particles being retrieved instead of 10, and also result in the exception
callback being called.
"""
log.debug('===== START TEST FOUND BAD DIAG HDR CHECKSUM AND TOO FEW RECS =====')
with open(os.path.join(RESOURCE_PATH, 'bad_diag_hdr_checksum_19_diag_VELPT_SN_11402_2014-07-02.aqd'), 'rb')\
as file_handle:
num_particles_to_request = num_expected_particles = 116
parser = VelptAbParser(self._parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'bad_diag_hdr_checksum_19_diag_VELPT_SN_11402_2014-07-02.yml',
RESOURCE_PATH)
log.debug('===== END TEST FOUND BAD DIAG HDR CHECKSUM AND TOO FEW RECS =====')
def test_bad_diag_checksum_21_recs(self):
"""
The file used in this test has a power record with a missing timestamp.
This results in 9 particles being retrieved instead of 10, and also result in the exception
callback being called.
"""
log.debug('===== START TEST FOUND BAD DIAG HDR CHECKSUM AND TOO MANY RECS =====')
with open(os.path.join(RESOURCE_PATH, 'bad_diag_hdr_checksum_21_diag_VELPT_SN_11402_2014-07-02.aqd'), 'rb')\
as file_handle:
num_particles_to_request = num_expected_particles = 118
parser = VelptAbParser(self._parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'bad_diag_hdr_checksum_21_diag_VELPT_SN_11402_2014-07-02.yml',
RESOURCE_PATH)
log.debug('===== END TEST FOUND BAD DIAG HDR CHECKSUM AND TOO MANY RECS =====')
def fix_yml_pressure_params(self):
"""
This helper tool was used to modify the yml files in response to ticket #4341
"""
pressure_regex = r' pressure:\s+(0.\d+)'
for file_name in os.listdir(RESOURCE_PATH):
if file_name.endswith('.yml'):
with open(os.path.join(RESOURCE_PATH, file_name), 'rU') as in_file_id:
out_file_name = file_name + '.new'
log.info('fixing file %s', file_name)
log.info('creating file %s', out_file_name)
out_file_id = open(os.path.join(RESOURCE_PATH, out_file_name), 'w')
for line in in_file_id:
match = re.match(pressure_regex, line)
if match is not None:
new_value = float(match.group(1)) * 1000.0
new_line = ' pressure_mbar: ' + str(new_value)
out_file_id.write(new_line + '\n')
else:
out_file_id.write(line)
out_file_id.close()
def fix_yml_float_params(self):
"""
This helper tool was used to modify the yml files in response to ticket #8564
"""
param_change_table = [
('battery_voltage', 'battery_voltage_dV', 10),
('sound_speed_analog2', 'sound_speed_dms', 10),
('heading', 'heading_decidegree', 10),
('pitch', 'pitch_decidegree', 10),
('roll', 'roll_decidegree', 10),
('pressure_mbar', 'pressure_mbar', 1),
('temperature', 'temperature_centidegree', 100),
('velocity_beam1', 'velocity_beam1', 1),
('velocity_beam2', 'velocity_beam2', 1),
('velocity_beam3', 'velocity_beam3', 1)
]
for file_name in os.listdir(RESOURCE_PATH):
if file_name.endswith('.yml'):
with open(os.path.join(RESOURCE_PATH, file_name), 'rU') as in_file_id:
out_file_name = file_name + '.new'
log.info('fixing file %s', file_name)
log.info('creating file %s', out_file_name)
out_file_id = open(os.path.join(RESOURCE_PATH, out_file_name), 'w')
for line in in_file_id:
new_line = line
for param_name, new_name, mult in param_change_table:
param_regex = r'\s+' + param_name + r':\s+(' + FLOAT_REGEX + ')' + END_OF_LINE_REGEX
match = re.match(param_regex, line)
if match is not None:
new_value = int(float(match.group(1)) * mult)
new_line = ' ' + new_name + ': ' + str(new_value) + '\n'
log.info('%s', new_line)
out_file_id.write(new_line)
out_file_id.close()
|
Stanford-Online/edx-platform
|
refs/heads/master
|
openedx/stanford/djangoapps/auth_lagunita/__init__.py
|
1
|
# -*- coding: utf-8 -*-
"""
Record extra lagunita-centric user data
"""
from __future__ import unicode_literals
|
clarammdantas/Online-Jugdes-Problems
|
refs/heads/master
|
online_judge_solutions/uri-1923.py
|
2
|
# URI 1923 - Rerisson and Barbecue
n, g = map(int, raw_input().split())
friends = {}
for i in range(n):
p1, p2 = raw_input().split()
if friends.get(p1) == None:
friends[p1] = [p2]
else:
friends[p1].append(p2)
if friends.get(p2) == None:
friends[p2] = [p1]
else:
friends[p2].append(p1)
level = {"Rerisson" : 0}
ans = []
i = 1
frontier = ["Rerisson"]
invited = 0
while i <= g and frontier:
next = []
for u in frontier:
for v in friends[u]:
if level.get(v) == None:
ans.append(v)
invited += 1
level[v] = i
next.append(v)
frontier = next
i += 1
print invited
invited_people = sorted(ans)
for i in range(invited):
print invited_people[i]
|
archf/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/f5/bigip_monitor_tcp.py
|
16
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2017 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: bigip_monitor_tcp
short_description: Manages F5 BIG-IP LTM tcp monitors.
description: Manages F5 BIG-IP LTM tcp monitors via iControl SOAP API.
version_added: "1.4"
options:
name:
description:
- Monitor name.
required: True
aliases:
- monitor
parent:
description:
- The parent template of this monitor template. Once this value has
been set, it cannot be changed. By default, this value is the C(tcp)
parent on the C(Common) partition.
default: "/Common/tcp"
send:
description:
- The send string for the monitor call.
receive:
description:
- The receive string for the monitor call.
ip:
description:
- IP address part of the IP/port definition. If this parameter is not
provided when creating a new monitor, then the default value will be
'*'.
- If this value is an IP address, and the C(type) is C(tcp) (the default),
then a C(port) number must be specified.
type:
description:
- The template type of this monitor template.
- Deprecated in 2.4. Use one of the C(bigip_monitor_tcp_echo) or
C(bigip_monitor_tcp_half_open) modules instead.
default: 'tcp'
choices:
- tcp
- tcp_echo
- tcp_half_open
- TTYPE_TCP
- TTYPE_TCP_ECHO
- TTYPE_TCP_HALF_OPEN
port:
description:
- Port address part of the IP/port definition. If this parameter is not
provided when creating a new monitor, then the default value will be
'*'. Note that if specifying an IP address, a value between 1 and 65535
must be specified
- This argument is not supported for TCP Echo types.
interval:
description:
- The interval specifying how frequently the monitor instance of this
template will run. If this parameter is not provided when creating
a new monitor, then the default value will be 5. This value B(must)
be less than the C(timeout) value.
timeout:
description:
- The number of seconds in which the node or service must respond to
the monitor request. If the target responds within the set time
period, it is considered up. If the target does not respond within
the set time period, it is considered down. You can change this
number to any number you want, however, it should be 3 times the
interval number of seconds plus 1 second. If this parameter is not
provided when creating a new monitor, then the default value will be 16.
time_until_up:
description:
- Specifies the amount of time in seconds after the first successful
response before a node will be marked up. A value of 0 will cause a
node to be marked up immediately after a valid response is received
from the node. If this parameter is not provided when creating
a new monitor, then the default value will be 0.
notes:
- Requires the f5-sdk Python package on the host. This is as easy as pip
install f5-sdk.
- Requires BIG-IP software version >= 12
requirements:
- f5-sdk >= 2.2.3
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Create TCP Monitor
bigip_monitor_tcp:
state: "present"
server: "lb.mydomain.com"
user: "admin"
password: "secret"
name: "my_tcp_monitor"
type: "tcp"
send: "tcp string to send"
receive: "tcp string to receive"
delegate_to: localhost
- name: Remove TCP Monitor
bigip_monitor_tcp:
state: "absent"
server: "lb.mydomain.com"
user: "admin"
password: "secret"
name: "my_tcp_monitor"
delegate_to: localhost
'''
RETURN = '''
parent:
description: New parent template of the monitor.
returned: changed
type: string
sample: "tcp"
send:
description: The new send string for this monitor.
returned: changed
type: string
sample: "tcp string to send"
receive:
description: The new receive string for this monitor.
returned: changed
type: string
sample: "tcp string to receive"
ip:
description: The new IP of IP/port definition.
returned: changed
type: string
sample: "10.12.13.14"
port:
description: The new port of IP/port definition.
returned: changed
type: string
sample: "admin@root.local"
interval:
description: The new interval in which to run the monitor check.
returned: changed
type: int
sample: 2
timeout:
description: The new timeout in which the remote system must respond to the monitor.
returned: changed
type: int
sample: 10
time_until_up:
description: The new time in which to mark a system as up after first successful response.
returned: changed
type: int
sample: 2
'''
import os
try:
import netaddr
HAS_NETADDR = True
except ImportError:
HAS_NETADDR = False
from ansible.module_utils.f5_utils import AnsibleF5Client
from ansible.module_utils.f5_utils import AnsibleF5Parameters
from ansible.module_utils.f5_utils import HAS_F5SDK
from ansible.module_utils.f5_utils import F5ModuleError
from ansible.module_utils.f5_utils import iteritems
from ansible.module_utils.f5_utils import defaultdict
try:
from ansible.module_utils.f5_utils import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
class Parameters(AnsibleF5Parameters):
def __init__(self, params=None):
self._values = defaultdict(lambda: None)
self._values['__warnings'] = []
if params:
self.update(params=params)
def update(self, params=None):
if params:
for k, v in iteritems(params):
if self.api_map is not None and k in self.api_map:
map_key = self.api_map[k]
else:
map_key = k
# Handle weird API parameters like `dns.proxy.__iter__` by
# using a map provided by the module developer
class_attr = getattr(type(self), map_key, None)
if isinstance(class_attr, property):
# There is a mapped value for the api_map key
if class_attr.fset is None:
# If the mapped value does not have
# an associated setter
self._values[map_key] = v
else:
# The mapped value has a setter
setattr(self, map_key, v)
else:
# If the mapped value is not a @property
self._values[map_key] = v
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
except Exception:
return result
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if self.api_map is not None and api_attribute in self.api_map:
result[api_attribute] = getattr(self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
@property
def interval(self):
if self._values['interval'] is None:
return None
if 1 > int(self._values['interval']) > 86400:
raise F5ModuleError(
"Interval value must be between 1 and 86400"
)
return int(self._values['interval'])
@property
def timeout(self):
if self._values['timeout'] is None:
return None
return int(self._values['timeout'])
@property
def ip(self):
if self._values['ip'] is None:
return None
try:
if self._values['ip'] in ['*', '0.0.0.0']:
return '*'
result = str(netaddr.IPAddress(self._values['ip']))
return result
except netaddr.core.AddrFormatError:
raise F5ModuleError(
"The provided 'ip' parameter is not an IP address."
)
@property
def time_until_up(self):
if self._values['time_until_up'] is None:
return None
return int(self._values['time_until_up'])
@property
def parent(self):
if self._values['parent_partition']:
if self._values['parent_partition'] is None:
return None
if self._values['parent_partition'].startswith('/'):
partition = os.path.basename(self._values['parent_partition'])
result = '/{0}/{1}'.format(partition, self.type)
else:
result = '/{0}/{1}'.format(self.parent_partition, self.type)
else:
if self._values['parent'] is None:
return None
if self._values['parent'].startswith('/'):
parent = os.path.basename(self._values['parent'])
result = '/{0}/{1}'.format(self.partition, parent)
else:
result = '/{0}/{1}'.format(self.partition, self._values['parent'])
return result
@property
def parent_partition(self):
if self._values['parent_partition'] is None:
return None
self._values['__warnings'].append(
dict(
msg="The parent_partition param is deprecated",
version='2.4'
)
)
return self._values['parent_partition']
class ParametersTcp(Parameters):
api_map = {
'timeUntilUp': 'time_until_up',
'defaultsFrom': 'parent',
'recv': 'receive'
}
api_attributes = [
'timeUntilUp', 'defaultsFrom', 'interval', 'timeout', 'recv', 'send',
'destination'
]
returnables = [
'parent', 'send', 'receive', 'ip', 'port', 'interval', 'timeout',
'time_until_up'
]
updatables = [
'destination', 'send', 'receive', 'interval', 'timeout', 'time_until_up'
]
@property
def port(self):
if self._values['port'] is None:
return None
elif self._values['port'] == '*':
return '*'
return int(self._values['port'])
@property
def destination(self):
if self.ip is None and self.port is None:
return None
destination = '{0}:{1}'.format(self.ip, self.port)
return destination
@destination.setter
def destination(self, value):
ip, port = value.split(':')
self._values['ip'] = ip
self._values['port'] = port
@property
def type(self):
return 'tcp'
@type.setter
def type(self, value):
if value:
self._values['__warnings'].append(
dict(
msg="The type param is deprecated",
version='2.4'
)
)
class ParametersEcho(Parameters):
api_map = {
'timeUntilUp': 'time_until_up',
'defaultsFrom': 'parent',
'destination': 'ip'
}
api_attributes = [
'timeUntilUp', 'defaultsFrom', 'interval', 'timeout', 'destination'
]
returnables = [
'parent', 'ip', 'interval', 'timeout', 'time_until_up'
]
updatables = [
'destination', 'interval', 'timeout', 'time_until_up'
]
@property
def type(self):
return 'tcp_echo'
@type.setter
def type(self, value):
if value:
self._values['__warnings'].append(
dict(
msg="The type param is deprecated",
version='2.4'
)
)
@property
def destination(self):
return self.ip
@destination.setter
def destination(self, value):
self._values['ip'] = value
@property
def send(self):
if self._values['send'] is None:
return None
raise F5ModuleError(
"The 'send' parameter is not available for TCP echo"
)
@property
def receive(self):
if self._values['receive'] is None:
return None
raise F5ModuleError(
"The 'receive' parameter is not available for TCP echo"
)
@property
def port(self):
return None
class ParametersHalfOpen(Parameters):
api_map = {
'timeUntilUp': 'time_until_up',
'defaultsFrom': 'parent'
}
api_attributes = [
'timeUntilUp', 'defaultsFrom', 'interval', 'timeout', 'destination'
]
returnables = [
'parent', 'ip', 'port', 'interval', 'timeout', 'time_until_up'
]
updatables = [
'destination', 'interval', 'timeout', 'time_until_up'
]
@property
def destination(self):
if self.ip is None and self.port is None:
return None
result = '{0}:{1}'.format(self.ip, self.port)
return result
@destination.setter
def destination(self, value):
ip, port = value.split(':')
self._values['ip'] = ip
self._values['port'] = port
@property
def port(self):
if self._values['port'] is None:
return None
elif self._values['port'] == '*':
return '*'
return int(self._values['port'])
@property
def type(self):
return 'tcp_half_open'
@type.setter
def type(self, value):
if value:
self._values['__warnings'].append(
dict(
msg="The type param is deprecated",
version='2.4'
)
)
@property
def send(self):
if self._values['send'] is None:
return None
raise F5ModuleError(
"The 'send' parameter is not available for TCP half open"
)
@property
def receive(self):
if self._values['receive'] is None:
return None
raise F5ModuleError(
"The 'receive' parameter is not available for TCP half open"
)
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
result = self.__default(param)
return result
@property
def parent(self):
if self.want.parent != self.want.parent:
raise F5ModuleError(
"The parent monitor cannot be changed"
)
@property
def destination(self):
if self.want.type == 'tcp_echo':
if self.want.ip is None:
return None
else:
if self.want.ip is None and self.want.port is None:
return None
if self.want.port is None:
self.want.update({'port': self.have.port})
if self.want.ip is None:
self.want.update({'ip': self.have.ip})
if self.want.port in [None, '*'] and self.want.ip != '*':
raise F5ModuleError(
"Specifying an IP address requires that a port number be specified"
)
if self.want.destination != self.have.destination:
return self.want.destination
@property
def interval(self):
if self.want.timeout is not None and self.want.interval is not None:
if self.want.interval >= self.want.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
elif self.want.timeout is not None:
if self.have.interval >= self.want.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
elif self.want.interval is not None:
if self.want.interval >= self.have.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
if self.want.interval != self.have.interval:
return self.want.interval
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
# TODO: Remove all of this in 2.5
class ModuleManager(object):
def __init__(self, client):
self.client = client
def exec_module(self):
type = self.client.module.params.get('type', 'tcp')
manager = self.get_manager(type)
return manager.exec_module()
def get_manager(self, type):
if type in [None, 'tcp', 'TTYPE_TCP']:
return TcpManager(self.client)
elif type in ['tcp_echo', 'TTYPE_TCP_ECHO']:
return TcpEchoManager(self.client)
elif type in ['tcp_half_open', 'TTYPE_TCP_HALF_OPEN']:
return TcpHalfOpenManager(self.client)
class BaseManager(object):
def _announce_deprecations(self):
warnings = []
if self.want:
warnings += self.want._values.get('__warnings', [])
if self.have:
warnings += self.have._values.get('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations()
return result
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def create(self):
self._set_changed_options()
self._set_default_creation_values()
if self.client.check_mode:
return True
self.create_on_device()
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.client.check_mode:
return True
self.update_on_device()
return True
def absent(self):
if self.exists():
return self.remove()
return False
def remove(self):
if self.client.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the monitor.")
return True
class TcpManager(BaseManager):
def __init__(self, client):
self.client = client
self.have = None
self.want = ParametersTcp(self.client.module.params)
self.changes = ParametersTcp()
def _set_changed_options(self):
changed = {}
for key in ParametersTcp.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = ParametersTcp(changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = ParametersTcp.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
changed[k] = change
if changed:
self.changes = ParametersTcp(changed)
return True
return False
def _set_default_creation_values(self):
if self.want.timeout is None:
self.want.update({'timeout': 16})
if self.want.interval is None:
self.want.update({'interval': 5})
if self.want.time_until_up is None:
self.want.update({'time_until_up': 0})
if self.want.ip is None:
self.want.update({'ip': '*'})
if self.want.port is None:
self.want.update({'port': '*'})
def read_current_from_device(self):
resource = self.client.api.tm.ltm.monitor.tcps.tcp.load(
name=self.want.name,
partition=self.want.partition
)
result = resource.attrs
return ParametersTcp(result)
def exists(self):
result = self.client.api.tm.ltm.monitor.tcps.tcp.exists(
name=self.want.name,
partition=self.want.partition
)
return result
def update_on_device(self):
params = self.changes.api_params()
result = self.client.api.tm.ltm.monitor.tcps.tcp.load(
name=self.want.name,
partition=self.want.partition
)
result.modify(**params)
def create_on_device(self):
params = self.want.api_params()
self.client.api.tm.ltm.monitor.tcps.tcp.create(
name=self.want.name,
partition=self.want.partition,
**params
)
def remove_from_device(self):
result = self.client.api.tm.ltm.monitor.tcps.tcp.load(
name=self.want.name,
partition=self.want.partition
)
if result:
result.delete()
# TODO: Remove this in 2.5 and put it its own module
class TcpEchoManager(BaseManager):
def __init__(self, client):
self.client = client
self.have = None
self.want = ParametersEcho(self.client.module.params)
self.changes = ParametersEcho()
def _set_default_creation_values(self):
if self.want.timeout is None:
self.want.update({'timeout': 16})
if self.want.interval is None:
self.want.update({'interval': 5})
if self.want.time_until_up is None:
self.want.update({'time_until_up': 0})
if self.want.ip is None:
self.want.update({'ip': '*'})
def _set_changed_options(self):
changed = {}
for key in ParametersEcho.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = ParametersEcho(changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = ParametersEcho.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
changed[k] = change
if changed:
self.changes = ParametersEcho(changed)
return True
return False
def read_current_from_device(self):
resource = self.client.api.tm.ltm.monitor.tcp_echos.tcp_echo.load(
name=self.want.name,
partition=self.want.partition
)
result = resource.attrs
return ParametersEcho(result)
def exists(self):
result = self.client.api.tm.ltm.monitor.tcp_echos.tcp_echo.exists(
name=self.want.name,
partition=self.want.partition
)
return result
def update_on_device(self):
params = self.want.api_params()
result = self.client.api.tm.ltm.monitor.tcp_echos.tcp_echo.load(
name=self.want.name,
partition=self.want.partition
)
result.modify(**params)
def create_on_device(self):
params = self.want.api_params()
self.client.api.tm.ltm.monitor.tcp_echos.tcp_echo.create(
name=self.want.name,
partition=self.want.partition,
**params
)
def remove_from_device(self):
result = self.client.api.tm.ltm.monitor.tcp_echos.tcp_echo.load(
name=self.want.name,
partition=self.want.partition
)
if result:
result.delete()
# TODO: Remove this in 2.5 and put it its own module
class TcpHalfOpenManager(BaseManager):
def __init__(self, client):
self.client = client
self.have = None
self.want = ParametersHalfOpen(self.client.module.params)
self.changes = ParametersHalfOpen()
def _set_changed_options(self):
changed = {}
for key in ParametersHalfOpen.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = ParametersHalfOpen(changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = ParametersHalfOpen.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
changed[k] = change
if changed:
self.changes = ParametersHalfOpen(changed)
return True
return False
def _set_default_creation_values(self):
if self.want.timeout is None:
self.want.update({'timeout': 16})
if self.want.interval is None:
self.want.update({'interval': 5})
if self.want.time_until_up is None:
self.want.update({'time_until_up': 0})
if self.want.ip is None:
self.want.update({'ip': '*'})
if self.want.port is None:
self.want.update({'port': '*'})
def read_current_from_device(self):
resource = self.client.api.tm.ltm.monitor.tcp_half_opens.tcp_half_open.load(
name=self.want.name,
partition=self.want.partition
)
result = resource.attrs
return ParametersHalfOpen(result)
def exists(self):
result = self.client.api.tm.ltm.monitor.tcp_half_opens.tcp_half_open.exists(
name=self.want.name,
partition=self.want.partition
)
return result
def update_on_device(self):
params = self.want.api_params()
result = self.client.api.tm.ltm.monitor.tcp_half_opens.tcp_half_open.load(
name=self.want.name,
partition=self.want.partition
)
result.modify(**params)
def create_on_device(self):
params = self.want.api_params()
self.client.api.tm.ltm.monitor.tcp_half_opens.tcp_half_open.create(
name=self.want.name,
partition=self.want.partition,
**params
)
def remove_from_device(self):
result = self.client.api.tm.ltm.monitor.tcp_half_opens.tcp_half_open.load(
name=self.want.name,
partition=self.want.partition
)
if result:
result.delete()
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
self.argument_spec = dict(
name=dict(required=True),
# Make this assume "tcp" in the partition specified. The user
# is required to specify the full path if they want to use a different
# partition.
parent=dict(),
send=dict(),
receive=dict(),
ip=dict(),
port=dict(type='int'),
interval=dict(type='int'),
timeout=dict(type='int'),
time_until_up=dict(type='int'),
# Deprecated params
type=dict(
removed_in_version='2.4',
choices=[
'tcp', 'TTYPE_TCP', 'TTYPE_TCP_ECHO', 'TTYPE_TCP_HALF_OPEN'
]
),
parent_partition=dict(
removed_in_version='2.4'
)
)
self.f5_product_name = 'bigip'
self.mutually_exclusive = [
['parent', 'parent_partition']
]
def main():
try:
spec = ArgumentSpec()
client = AnsibleF5Client(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
f5_product_name=spec.f5_product_name,
mutually_exclusive=spec.mutually_exclusive
)
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
if not HAS_NETADDR:
raise F5ModuleError("The python netaddr module is required")
mm = ModuleManager(client)
results = mm.exec_module()
client.module.exit_json(**results)
except F5ModuleError as e:
client.module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
|
SteveXiSong/UW-Madison-ECE757-S15-MulticastSnooping
|
refs/heads/master
|
src/arch/x86/isa/insts/general_purpose/control_transfer/jump.py
|
40
|
# Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop JMP_I
{
# Make the default data size of jumps 64 bits in 64 bit mode
.adjust_env oszIn64Override
rdip t1
limm t2, imm
wrip t1, t2
};
def macroop JMP_R
{
# Make the default data size of jumps 64 bits in 64 bit mode
.adjust_env oszIn64Override
wripi reg, 0
};
def macroop JMP_M
{
# Make the default data size of jumps 64 bits in 64 bit mode
.adjust_env oszIn64Override
ld t1, seg, sib, disp
wripi t1, 0
};
def macroop JMP_P
{
# Make the default data size of jumps 64 bits in 64 bit mode
.adjust_env oszIn64Override
rdip t7
ld t1, seg, riprel, disp
wripi t1, 0
};
def macroop JMP_FAR_M
{
limm t1, 0, dataSize=8
limm t2, 0, dataSize=8
lea t1, seg, sib, disp, dataSize=asz
ld t2, seg, [1, t0, t1], dsz
ld t1, seg, [1, t0, t1]
br rom_label("jmpFarWork")
};
def macroop JMP_FAR_P
{
limm t1, 0, dataSize=8
limm t2, 0, dataSize=8
rdip t7, dataSize=asz
lea t1, seg, riprel, disp, dataSize=asz
ld t2, seg, [1, t0, t1], dsz
ld t1, seg, [1, t0, t1]
br rom_label("jmpFarWork")
};
def macroop JMP_FAR_I
{
# Put the whole far pointer into a register.
limm t2, imm, dataSize=8
# Figure out the width of the offset.
limm t3, dsz, dataSize=8
slli t3, t3, 3, dataSize=8
# Get the offset into t1.
mov t1, t0, t2
# Get the selector into t2.
srl t2, t2, t3, dataSize=8
mov t2, t0, t2, dataSize=2
br rom_label("jmpFarWork")
};
def rom
{
extern jmpFarWork:
# t1 has the offset and t2 has the new selector.
# This is intended to run in protected mode.
andi t0, t2, 0xFC, flags=(EZF,), dataSize=2
fault "new GeneralProtection(0)", flags=(CEZF,)
andi t3, t2, 0xF8, dataSize=8
andi t0, t2, 0x4, flags=(EZF,), dataSize=2
br rom_local_label("farJmpGlobalDescriptor"), flags=(CEZF,)
ld t4, tsl, [1, t0, t3], dataSize=8, addressSize=8, atCPL0=True
br rom_local_label("farJmpProcessDescriptor")
farJmpGlobalDescriptor:
ld t4, tsg, [1, t0, t3], dataSize=8, addressSize=8, atCPL0=True
farJmpProcessDescriptor:
rcri t0, t4, 13, flags=(ECF,), dataSize=2
br rom_local_label("farJmpSystemDescriptor"), flags=(nCECF,)
chks t2, t4, CSCheck, dataSize=8
wrdl cs, t4, t2
wrsel cs, t2
wrip t0, t1
eret
farJmpSystemDescriptor:
panic "Far jumps to system descriptors aren't implemented"
eret
};
def macroop JMP_FAR_REAL_M
{
lea t1, seg, sib, disp, dataSize=asz
ld t2, seg, [1, t0, t1], dsz
ld t1, seg, [1, t0, t1]
zexti t3, t1, 15, dataSize=8
slli t3, t3, 4, dataSize=8
wrsel cs, t1, dataSize=2
wrbase cs, t3
wrip t0, t2, dataSize=asz
};
def macroop JMP_FAR_REAL_P
{
panic "Real mode far jump executed in 64 bit mode!"
};
def macroop JMP_FAR_REAL_I
{
# Put the whole far pointer into a register.
limm t2, imm, dataSize=8
# Figure out the width of the offset.
limm t3, dsz, dataSize=8
slli t3, t3, 3, dataSize=8
# Get the selector into t1.
sll t1, t2, t3, dataSize=8
mov t1, t0, t1, dataSize=2
# And get the offset into t2
mov t2, t0, t2
slli t3, t3, 4, dataSize=8
wrsel cs, t1, dataSize=2
wrbase cs, t3
wrip t0, t2, dataSize=asz
};
'''
|
ssteinerx/pingparser
|
refs/heads/master
|
setup.py
|
2
|
#!/usr/bin/env python
# coding: utf-8
from setuptools import setup
try:
import configparser
except ImportError:
import ConfigParser as configparser
SETUP_ARGS = {"name" : ("metadata",),
"version" : ("metadata",),
"description" : ("metadata", "summary"),
"author" : ("metadata",),
"author_email": ("metadata",),
"keywords" : ("metadata",),
"url" : ("metadata", "home_page"),
"license" : ("metadata",),
"py_modules" : ("files", "modules"),
"requires" : ("metadata", "requires_dist"),
"classifiers" : ("metadata", "classifier"),
}
MULTI = ("classifiers",
"requires",
"py_modules",
)
def generate_setuptools_kwargs_from_setup_cfg():
config = configparser.RawConfigParser()
config.read('setup.cfg')
kwargs = {}
for arg in SETUP_ARGS:
if len(SETUP_ARGS[arg]) == 2:
section, option = SETUP_ARGS[arg]
elif len(SETUP_ARGS[arg]) == 1:
section = SETUP_ARGS[arg][0]
option = arg
try:
in_cfg_value = config.get(section, option)
except configparser.NoOptionError:
# There is no such option in the setup.cfg
continue
if arg in MULTI:
# Special behaviour when we have a multi line option
if "\n" in in_cfg_value:
in_cfg_value = in_cfg_value.strip().split('\n')
else:
in_cfg_value = list((in_cfg_value,))
kwargs[arg] = in_cfg_value
return kwargs
kwargs = generate_setuptools_kwargs_from_setup_cfg()
kwargs['long_description'] = open('README.rst', 'rt').read()
setup(**kwargs)
|
ApplebyCoin/ApplebyCoin
|
refs/heads/master
|
share/demurrage_parameters_arithmetic.py
|
41
|
#!/usr/bin/env python
from __future__ import division
try:
from gmpy2 import mpq as Fraction
except ImportError:
from fractions import Fraction
TOTAL_SUPPLY = 10**16 - 1
EQ_HEIGHT = 161280
TITHE_RATIO = Fraction(4,5)
TITHE_AMOUNT = TOTAL_SUPPLY * TITHE_RATIO / EQ_HEIGHT
DEMURRAGE_RATE = 2**20
def sample_run(subsidy):
def get_block_value(height):
if height < EQ_HEIGHT:
return Fraction(TOTAL_SUPPLY, DEMURRAGE_RATE) + TITHE_AMOUNT + \
Fraction((EQ_HEIGHT - height) * subsidy, EQ_HEIGHT)
else:
return Fraction(TOTAL_SUPPLY, DEMURRAGE_RATE)
total=0
for i in xrange(EQ_HEIGHT):
total = total + get_block_value(i) - Fraction(total, DEMURRAGE_RATE)
return total
print "Performing binary search..."
low,high = 0, TOTAL_SUPPLY
while True:
if (high-low) <= 1:
break
sample = (low+high)//2
result = sample_run(sample)
if result < TOTAL_SUPPLY:
low = sample
else:
high = sample
print(high - low)
initial = low
if sample_run(initial) >= TOTAL_SUPPLY:
initial = initial - 1
print "Tithe amount: %d" % TITHE_AMOUNT
print "Initial subsidy: %d" % initial
print "Initial reward: %d" % (initial + TOTAL_SUPPLY//DEMURRAGE_RATE)
print "Final subsidy: %d" % 0
print "Final reward: %d" % (TOTAL_SUPPLY//DEMURRAGE_RATE)
print "Equilibrium: Block #%d" % EQ_HEIGHT
print "Approx delta: %f" % (initial / EQ_HEIGHT / 100000000)
print "Max coins: %d" % sample_run(initial)
|
timlinux/QGIS
|
refs/heads/master
|
python/core/auto_additions/qgsmapclippingregion.py
|
29
|
# The following has been generated automatically from src/core/qgsmapclippingregion.h
# monkey patching scoped based enum
QgsMapClippingRegion.FeatureClippingType.ClipToIntersection.__doc__ = "Clip the geometry of these features to the region prior to rendering (i.e. feature boundaries will follow the clip region)"
QgsMapClippingRegion.FeatureClippingType.ClipPainterOnly.__doc__ = "Applying clipping on the painter only (i.e. feature boundaries will be unchanged, but may be invisible where the feature falls outside the clipping region)"
QgsMapClippingRegion.FeatureClippingType.NoClipping.__doc__ = "Only render features which intersect the clipping region, but do not clip these features to the region"
QgsMapClippingRegion.FeatureClippingType.__doc__ = 'Feature clipping behavior, which controls how features from vector layers\nwill be clipped.\n\n' + '* ``ClipToIntersection``: ' + QgsMapClippingRegion.FeatureClippingType.ClipToIntersection.__doc__ + '\n' + '* ``ClipPainterOnly``: ' + QgsMapClippingRegion.FeatureClippingType.ClipPainterOnly.__doc__ + '\n' + '* ``NoClipping``: ' + QgsMapClippingRegion.FeatureClippingType.NoClipping.__doc__
# --
|
lexus42/4022322442test
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/_functools.py
|
727
|
def partial(func, *args, **keywords):
def newfunc(*fargs, **fkeywords):
newkeywords = keywords.copy()
newkeywords.update(fkeywords)
return func(*(args + fargs), **newkeywords)
newfunc.func = func
newfunc.args = args
newfunc.keywords = keywords
return newfunc
def reduce(func,iterable,initializer=None):
args = iter(iterable)
if initializer is not None:
res = initializer
else:
res = next(args)
while True:
try:
res = func(res,next(args))
except StopIteration:
return res
|
openiitbombayx/edx-platform
|
refs/heads/master
|
lms/djangoapps/django_comment_client/tests/unicode.py
|
206
|
# coding=utf-8
class UnicodeTestMixin(object):
def test_ascii(self):
self._test_unicode_data(u"This post contains ASCII.")
def test_latin_1(self):
self._test_unicode_data(u"Thís pøst çòñtáins Lätin-1 tæxt")
def test_CJK(self):
self._test_unicode_data(u"イんノ丂 アo丂イ co刀イムノ刀丂 cフズ")
def test_non_BMP(self):
self._test_unicode_data(u"𝕋𝕙𝕚𝕤 𝕡𝕠𝕤𝕥 𝕔𝕠𝕟𝕥𝕒𝕚𝕟𝕤 𝕔𝕙𝕒𝕣𝕒𝕔𝕥𝕖𝕣𝕤 𝕠𝕦𝕥𝕤𝕚𝕕𝕖 𝕥𝕙𝕖 𝔹𝕄ℙ")
def test_special_chars(self):
self._test_unicode_data(u"\" This , post > contains < delimiter ] and [ other } special { characters ; that & may ' break things")
def test_string_interp(self):
self._test_unicode_data(u"This post contains %s string interpolation #{syntax}")
|
Dave667/service
|
refs/heads/master
|
script.module.requests/lib/requests/packages/charade/langgreekmodel.py
|
22
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin7_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 90,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,248, 61, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
win1253_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 61,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,253,253, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 98.2851%
# first 1024 sequences:1.7001%
# rest sequences: 0.0359%
# negative sequences: 0.0148%
GreekLangModel = (
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,2,2,3,3,3,3,3,3,3,3,1,3,3,3,0,2,2,3,3,0,3,0,3,2,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,3,0,3,2,3,3,0,3,2,3,3,3,0,0,3,0,3,0,3,3,2,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,2,3,2,2,3,3,3,3,3,3,3,3,0,3,3,3,3,0,2,3,3,0,3,3,3,3,2,3,3,3,0,
2,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,2,1,3,3,3,3,2,3,3,2,3,3,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,2,3,3,0,
2,0,1,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,3,0,0,0,0,3,3,0,3,1,3,3,3,0,3,3,0,3,3,3,3,0,0,0,0,
2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,0,3,3,3,3,3,0,3,2,2,2,3,0,2,3,3,3,3,3,2,3,3,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,2,2,2,3,3,3,3,0,3,1,3,3,3,3,2,3,3,3,3,3,3,3,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,0,0,0,3,3,2,3,3,3,3,3,0,0,3,2,3,0,2,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,0,3,3,0,2,3,0,3,0,3,3,3,0,0,3,0,3,0,2,2,3,3,0,0,
0,0,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,2,3,3,3,3,0,3,3,3,3,3,0,3,3,2,3,2,3,3,2,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,2,3,2,3,3,3,3,3,3,0,2,3,2,3,2,2,2,3,2,3,3,2,3,0,2,2,2,3,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,2,3,3,0,0,3,0,3,0,0,0,3,2,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,0,0,0,3,3,0,3,3,3,0,0,1,2,3,0,
3,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,0,3,2,2,3,3,0,3,3,3,3,3,2,1,3,0,3,2,3,3,2,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,3,0,2,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,3,0,3,2,3,0,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,2,0,3,2,3,0,0,3,2,3,0,
2,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,1,2,2,3,3,3,3,3,3,0,2,3,0,3,0,0,0,3,3,0,3,0,2,0,0,2,3,1,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,3,0,3,3,2,3,0,3,3,3,3,3,3,0,3,3,3,0,2,3,0,0,3,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,3,3,0,3,0,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,3,3,3,0,0,3,0,2,0,0,0,3,3,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,0,3,0,2,0,3,2,0,3,2,3,2,3,0,0,3,2,3,2,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,3,3,0,0,0,3,0,2,1,0,0,3,2,2,2,0,3,0,0,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,2,0,3,0,3,0,3,3,0,2,1,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,3,0,3,3,3,3,3,3,0,2,3,0,3,0,0,0,2,1,0,2,2,3,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,2,3,0,0,1,3,0,2,0,0,0,0,3,0,1,0,2,0,0,1,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,1,0,3,0,0,0,3,2,0,3,2,3,3,3,0,0,3,0,3,2,2,2,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,0,2,0,2,3,3,2,2,2,2,3,0,2,0,2,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,2,0,0,0,0,0,0,2,3,0,2,0,2,3,2,0,0,3,0,3,0,3,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,2,3,3,2,2,3,0,2,0,3,0,0,0,2,0,0,0,0,1,2,0,2,0,2,0,
0,2,0,2,0,2,2,0,0,1,0,2,2,2,0,2,2,2,0,2,2,2,0,0,2,0,0,1,0,0,0,0,
0,2,0,3,3,2,0,0,0,0,0,0,1,3,0,2,0,2,2,2,0,0,2,0,3,0,0,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,3,2,0,2,2,0,2,0,2,2,0,2,0,2,2,2,0,0,0,0,0,0,2,3,0,0,0,2,
0,1,2,0,0,0,0,2,2,0,0,0,2,1,0,2,2,0,0,0,0,0,0,1,0,2,0,0,0,0,0,0,
0,0,2,1,0,2,3,2,2,3,2,3,2,0,0,3,3,3,0,0,3,2,0,0,0,1,1,0,2,0,2,2,
0,2,0,2,0,2,2,0,0,2,0,2,2,2,0,2,2,2,2,0,0,2,0,0,0,2,0,1,0,0,0,0,
0,3,0,3,3,2,2,0,3,0,0,0,2,2,0,2,2,2,1,2,0,0,1,2,2,0,0,3,0,0,0,2,
0,1,2,0,0,0,1,2,0,0,0,0,0,0,0,2,2,0,1,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,2,0,0,0,2,0,2,3,3,0,2,0,0,0,0,0,0,2,2,2,0,2,2,0,2,0,2,
0,2,2,0,0,2,2,2,2,1,0,0,2,2,0,2,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,0,3,2,3,0,0,0,3,0,0,2,2,0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,2,2,0,0,2,2,2,0,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,3,2,0,2,2,2,2,2,0,0,0,2,0,0,0,0,2,0,1,0,0,2,0,1,0,0,0,
0,2,2,2,0,2,2,0,1,2,0,2,2,2,0,2,2,2,2,1,2,2,0,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,2,0,2,2,0,0,0,0,1,2,1,0,0,2,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,3,2,3,0,0,2,0,0,0,2,2,0,2,0,0,0,1,0,0,2,0,2,0,2,2,0,0,0,0,
0,0,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,2,3,2,2,0,0,0,0,0,0,1,3,0,2,0,2,2,0,0,0,1,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,0,3,2,0,2,0,0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,1,0,0,2,1,2,0,2,2,0,1,0,0,1,0,0,0,2,0,0,0,0,0,0,
0,3,0,2,2,2,0,0,2,0,0,0,2,0,0,0,2,3,0,2,0,0,0,0,0,0,2,2,0,0,0,2,
0,1,2,0,0,0,1,2,2,1,0,0,0,2,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,2,0,2,2,0,2,0,0,2,0,0,0,0,1,2,1,0,2,1,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,0,3,1,2,2,0,2,0,0,0,0,2,0,0,0,2,0,0,3,0,0,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,2,2,2,2,2,0,1,2,0,0,0,2,2,0,1,0,2,0,0,2,2,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,0,0,0,0,2,0,2,0,0,0,0,2,
0,1,2,0,0,0,0,2,2,1,0,1,0,1,0,2,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,2,0,0,2,2,0,0,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,
0,2,2,2,2,0,0,0,3,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,2,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,2,2,2,0,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,1,0,0,0,0,2,1,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,0,0,2,2,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,2,1,0,0,0,0,0,0,2,0,0,2,0,2,2,2,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,2,0,2,2,0,0,0,0,2,0,2,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,3,0,0,0,2,2,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,0,0,
0,2,2,2,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,
0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,0,0,2,0,0,0,0,0,1,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,2,0,0,0,
0,2,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,2,0,2,0,0,0,
0,0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,1,2,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
Latin7GreekModel = {
'charToOrderMap': Latin7_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-7"
}
Win1253GreekModel = {
'charToOrderMap': win1253_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': False,
'charsetName': "windows-1253"
}
# flake8: noqa
|
chriskiehl/Gooey
|
refs/heads/master
|
gooey/gui/util/filedrop.py
|
1
|
import wx
class FileDrop(wx.FileDropTarget):
def __init__(self, window, dropStrategy=None):
wx.FileDropTarget.__init__(self)
self.window = window
self.dropHandler = dropStrategy or self._defaultStrategy
def OnDropFiles(self, x, y, filenames):
return self.dropHandler(x, y, filenames)
def _defaultStrategy(self, x, y, filenames):
for name in filenames:
self.window.WriteText(name)
return True
|
naokimiyasaka/sublime-text
|
refs/heads/master
|
Backup/20130812102255/Package Control/lib/windows/ntlm/des.py
|
4
|
# This file is part of 'NTLM Authorization Proxy Server' http://sourceforge.net/projects/ntlmaps/
# Copyright 2001 Dmitry A. Rozmanov <dima@xenon.spb.ru>
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/> or <http://www.gnu.org/licenses/lgpl.txt>.
import des_c
#---------------------------------------------------------------------
class DES:
des_c_obj = None
#-----------------------------------------------------------------
def __init__(self, key_str):
""
k = str_to_key56(key_str)
k = key56_to_key64(k)
key_str = ''
for i in k:
key_str += chr(i & 0xFF)
self.des_c_obj = des_c.DES(key_str)
#-----------------------------------------------------------------
def encrypt(self, plain_text):
""
return self.des_c_obj.encrypt(plain_text)
#-----------------------------------------------------------------
def decrypt(self, crypted_text):
""
return self.des_c_obj.decrypt(crypted_text)
#---------------------------------------------------------------------
#Some Helpers
#---------------------------------------------------------------------
DESException = 'DESException'
#---------------------------------------------------------------------
def str_to_key56(key_str):
""
if type(key_str) != type(''):
#rise DESException, 'ERROR. Wrong key type.'
pass
if len(key_str) < 7:
key_str = key_str + '\000\000\000\000\000\000\000'[:(7 - len(key_str))]
key_56 = []
for i in key_str[:7]: key_56.append(ord(i))
return key_56
#---------------------------------------------------------------------
def key56_to_key64(key_56):
""
key = []
for i in range(8): key.append(0)
key[0] = key_56[0];
key[1] = ((key_56[0] << 7) & 0xFF) | (key_56[1] >> 1);
key[2] = ((key_56[1] << 6) & 0xFF) | (key_56[2] >> 2);
key[3] = ((key_56[2] << 5) & 0xFF) | (key_56[3] >> 3);
key[4] = ((key_56[3] << 4) & 0xFF) | (key_56[4] >> 4);
key[5] = ((key_56[4] << 3) & 0xFF) | (key_56[5] >> 5);
key[6] = ((key_56[5] << 2) & 0xFF) | (key_56[6] >> 6);
key[7] = (key_56[6] << 1) & 0xFF;
key = set_key_odd_parity(key)
return key
#---------------------------------------------------------------------
def set_key_odd_parity(key):
""
for i in range(len(key)):
for k in range(7):
bit = 0
t = key[i] >> k
bit = (t ^ bit) & 0x1
key[i] = (key[i] & 0xFE) | bit
return key
|
hryamzik/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/amazon/ec2_snapshot_copy.py
|
26
|
#!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: ec2_snapshot_copy
short_description: copies an EC2 snapshot and returns the new Snapshot ID.
description:
- Copies an EC2 Snapshot from a source region to a destination region.
version_added: "2.4"
options:
source_region:
description:
- The source region the Snapshot should be copied from.
required: true
source_snapshot_id:
description:
- The ID of the Snapshot in source region that should be copied.
required: true
description:
description:
- An optional human-readable string describing purpose of the new Snapshot.
encrypted:
description:
- Whether or not the destination Snapshot should be encrypted.
type: bool
default: 'no'
kms_key_id:
description:
- KMS key id used to encrypt snapshot. If not specified, defaults to EBS Customer Master Key (CMK) for that account.
wait:
description:
- Wait for the copied Snapshot to be in 'Available' state before returning.
type: bool
default: 'no'
wait_timeout:
version_added: "2.6"
description:
- How long before wait gives up, in seconds.
default: 600
tags:
description:
- A hash/dictionary of tags to add to the new Snapshot; '{"key":"value"}' and '{"key":"value","key":"value"}'
author: "Deepak Kothandan <deepak.kdy@gmail.com>"
extends_documentation_fragment:
- aws
- ec2
requirements:
- boto3
'''
EXAMPLES = '''
# Basic Snapshot Copy
- ec2_snapshot_copy:
source_region: eu-central-1
region: eu-west-1
source_snapshot_id: snap-xxxxxxx
# Copy Snapshot and wait until available
- ec2_snapshot_copy:
source_region: eu-central-1
region: eu-west-1
source_snapshot_id: snap-xxxxxxx
wait: yes
wait_timeout: 1200 # Default timeout is 600
register: snapshot_id
# Tagged Snapshot copy
- ec2_snapshot_copy:
source_region: eu-central-1
region: eu-west-1
source_snapshot_id: snap-xxxxxxx
tags:
Name: Snapshot-Name
# Encrypted Snapshot copy
- ec2_snapshot_copy:
source_region: eu-central-1
region: eu-west-1
source_snapshot_id: snap-xxxxxxx
encrypted: yes
# Encrypted Snapshot copy with specified key
- ec2_snapshot_copy:
source_region: eu-central-1
region: eu-west-1
source_snapshot_id: snap-xxxxxxx
encrypted: yes
kms_key_id: arn:aws:kms:eu-central-1:XXXXXXXXXXXX:key/746de6ea-50a4-4bcb-8fbc-e3b29f2d367b
'''
RETURN = '''
snapshot_id:
description: snapshot id of the newly created snapshot
returned: when snapshot copy is successful
type: string
sample: "snap-e9095e8c"
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (boto3_conn, ec2_argument_spec, get_aws_connection_info, camel_dict_to_snake_dict)
from ansible.module_utils._text import to_native
try:
import boto3
from botocore.exceptions import ClientError, WaiterError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
def copy_snapshot(module, ec2):
"""
Copies an EC2 Snapshot to another region
module : AnsibleModule object
ec2: ec2 connection object
"""
params = {
'SourceRegion': module.params.get('source_region'),
'SourceSnapshotId': module.params.get('source_snapshot_id'),
'Description': module.params.get('description')
}
if module.params.get('encrypted'):
params['Encrypted'] = True
if module.params.get('kms_key_id'):
params['KmsKeyId'] = module.params.get('kms_key_id')
try:
snapshot_id = ec2.copy_snapshot(**params)['SnapshotId']
if module.params.get('wait'):
delay = 15
# Add one to max_attempts as wait() increment
# its counter before assessing it for time.sleep()
max_attempts = (module.params.get('wait_timeout') // delay) + 1
ec2.get_waiter('snapshot_completed').wait(
SnapshotIds=[snapshot_id],
WaiterConfig=dict(Delay=delay, MaxAttempts=max_attempts)
)
if module.params.get('tags'):
ec2.create_tags(
Resources=[snapshot_id],
Tags=[{'Key': k, 'Value': v} for k, v in module.params.get('tags').items()]
)
except WaiterError as we:
module.fail_json(msg='An error occurred waiting for the snapshot to become available. (%s)' % str(we), exception=traceback.format_exc())
except ClientError as ce:
module.fail_json(msg=str(ce), exception=traceback.format_exc(), **camel_dict_to_snake_dict(ce.response))
module.exit_json(changed=True, snapshot_id=snapshot_id)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
source_region=dict(required=True),
source_snapshot_id=dict(required=True),
description=dict(default=''),
encrypted=dict(type='bool', default=False, required=False),
kms_key_id=dict(type='str', required=False),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=600),
tags=dict(type='dict')))
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='botocore and boto3 are required.')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
client = boto3_conn(module, conn_type='client', resource='ec2',
region=region, endpoint=ec2_url, **aws_connect_kwargs)
copy_snapshot(module, client)
if __name__ == '__main__':
main()
|
vitan/django
|
refs/heads/master
|
tests/sessions_tests/tests.py
|
8
|
import base64
import os
import shutil
import string
import tempfile
import unittest
from datetime import timedelta
from django.conf import settings
from django.contrib.sessions.backends.cache import SessionStore as CacheSession
from django.contrib.sessions.backends.cached_db import \
SessionStore as CacheDBSession
from django.contrib.sessions.backends.db import SessionStore as DatabaseSession
from django.contrib.sessions.backends.file import SessionStore as FileSession
from django.contrib.sessions.backends.signed_cookies import \
SessionStore as CookieSession
from django.contrib.sessions.exceptions import InvalidSessionKey
from django.contrib.sessions.middleware import SessionMiddleware
from django.contrib.sessions.models import Session
from django.contrib.sessions.serializers import (
JSONSerializer, PickleSerializer,
)
from django.core import management
from django.core.cache import caches
from django.core.cache.backends.base import InvalidCacheBackendError
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponse
from django.test import (
RequestFactory, TestCase, ignore_warnings, override_settings,
)
from django.test.utils import patch_logger
from django.utils import six, timezone
from django.utils.encoding import force_text
from django.utils.six.moves import http_cookies
class SessionTestsMixin(object):
# This does not inherit from TestCase to avoid any tests being run with this
# class, which wouldn't work, and to allow different TestCase subclasses to
# be used.
backend = None # subclasses must specify
def setUp(self):
self.session = self.backend()
def tearDown(self):
# NB: be careful to delete any sessions created; stale sessions fill up
# the /tmp (with some backends) and eventually overwhelm it after lots
# of runs (think buildbots)
self.session.delete()
def test_new_session(self):
self.assertFalse(self.session.modified)
self.assertFalse(self.session.accessed)
def test_get_empty(self):
self.assertEqual(self.session.get('cat'), None)
def test_store(self):
self.session['cat'] = "dog"
self.assertTrue(self.session.modified)
self.assertEqual(self.session.pop('cat'), 'dog')
def test_pop(self):
self.session['some key'] = 'exists'
# Need to reset these to pretend we haven't accessed it:
self.accessed = False
self.modified = False
self.assertEqual(self.session.pop('some key'), 'exists')
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
self.assertEqual(self.session.get('some key'), None)
def test_pop_default(self):
self.assertEqual(self.session.pop('some key', 'does not exist'),
'does not exist')
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
def test_setdefault(self):
self.assertEqual(self.session.setdefault('foo', 'bar'), 'bar')
self.assertEqual(self.session.setdefault('foo', 'baz'), 'bar')
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
def test_update(self):
self.session.update({'update key': 1})
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
self.assertEqual(self.session.get('update key', None), 1)
def test_has_key(self):
self.session['some key'] = 1
self.session.modified = False
self.session.accessed = False
self.assertIn('some key', self.session)
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
def test_values(self):
self.assertEqual(list(self.session.values()), [])
self.assertTrue(self.session.accessed)
self.session['some key'] = 1
self.assertEqual(list(self.session.values()), [1])
def test_iterkeys(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = six.iterkeys(self.session)
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), ['x'])
def test_itervalues(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = six.itervalues(self.session)
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), [1])
def test_iteritems(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = six.iteritems(self.session)
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), [('x', 1)])
def test_clear(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
self.assertEqual(list(self.session.items()), [('x', 1)])
self.session.clear()
self.assertEqual(list(self.session.items()), [])
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
def test_save(self):
if (hasattr(self.session, '_cache') and 'DummyCache' in
settings.CACHES[settings.SESSION_CACHE_ALIAS]['BACKEND']):
raise unittest.SkipTest("Session saving tests require a real cache backend")
self.session.save()
self.assertTrue(self.session.exists(self.session.session_key))
def test_delete(self):
self.session.save()
self.session.delete(self.session.session_key)
self.assertFalse(self.session.exists(self.session.session_key))
def test_flush(self):
self.session['foo'] = 'bar'
self.session.save()
prev_key = self.session.session_key
self.session.flush()
self.assertFalse(self.session.exists(prev_key))
self.assertNotEqual(self.session.session_key, prev_key)
self.assertTrue(self.session.modified)
self.assertTrue(self.session.accessed)
def test_cycle(self):
self.session['a'], self.session['b'] = 'c', 'd'
self.session.save()
prev_key = self.session.session_key
prev_data = list(self.session.items())
self.session.cycle_key()
self.assertNotEqual(self.session.session_key, prev_key)
self.assertEqual(list(self.session.items()), prev_data)
def test_invalid_key(self):
# Submitting an invalid session key (either by guessing, or if the db has
# removed the key) results in a new key being generated.
try:
session = self.backend('1')
try:
session.save()
except AttributeError:
self.fail(
"The session object did not save properly. "
"Middleware may be saving cache items without namespaces."
)
self.assertNotEqual(session.session_key, '1')
self.assertEqual(session.get('cat'), None)
session.delete()
finally:
# Some backends leave a stale cache entry for the invalid
# session key; make sure that entry is manually deleted
session.delete('1')
def test_session_key_is_read_only(self):
def set_session_key(session):
session.session_key = session._get_new_session_key()
self.assertRaises(AttributeError, set_session_key, self.session)
# Custom session expiry
def test_default_expiry(self):
# A normal session has a max age equal to settings
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
# So does a custom session with an idle expiration time of 0 (but it'll
# expire at browser close)
self.session.set_expiry(0)
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
def test_custom_expiry_seconds(self):
modification = timezone.now()
self.session.set_expiry(10)
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_timedelta(self):
modification = timezone.now()
# Mock timezone.now, because set_expiry calls it on this code path.
original_now = timezone.now
try:
timezone.now = lambda: modification
self.session.set_expiry(timedelta(seconds=10))
finally:
timezone.now = original_now
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_datetime(self):
modification = timezone.now()
self.session.set_expiry(modification + timedelta(seconds=10))
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_reset(self):
self.session.set_expiry(None)
self.session.set_expiry(10)
self.session.set_expiry(None)
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
def test_get_expire_at_browser_close(self):
# Tests get_expire_at_browser_close with different settings and different
# set_expiry calls
with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=False):
self.session.set_expiry(10)
self.assertFalse(self.session.get_expire_at_browser_close())
self.session.set_expiry(0)
self.assertTrue(self.session.get_expire_at_browser_close())
self.session.set_expiry(None)
self.assertFalse(self.session.get_expire_at_browser_close())
with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=True):
self.session.set_expiry(10)
self.assertFalse(self.session.get_expire_at_browser_close())
self.session.set_expiry(0)
self.assertTrue(self.session.get_expire_at_browser_close())
self.session.set_expiry(None)
self.assertTrue(self.session.get_expire_at_browser_close())
def test_decode(self):
# Ensure we can decode what we encode
data = {'a test key': 'a test value'}
encoded = self.session.encode(data)
self.assertEqual(self.session.decode(encoded), data)
def test_decode_failure_logged_to_security(self):
bad_encode = base64.b64encode(b'flaskdj:alkdjf')
with patch_logger('django.security.SuspiciousSession', 'warning') as calls:
self.assertEqual({}, self.session.decode(bad_encode))
# check that the failed decode is logged
self.assertEqual(len(calls), 1)
self.assertIn('corrupted', calls[0])
def test_actual_expiry(self):
# this doesn't work with JSONSerializer (serializing timedelta)
with override_settings(SESSION_SERIALIZER='django.contrib.sessions.serializers.PickleSerializer'):
self.session = self.backend() # reinitialize after overriding settings
# Regression test for #19200
old_session_key = None
new_session_key = None
try:
self.session['foo'] = 'bar'
self.session.set_expiry(-timedelta(seconds=10))
self.session.save()
old_session_key = self.session.session_key
# With an expiry date in the past, the session expires instantly.
new_session = self.backend(self.session.session_key)
new_session_key = new_session.session_key
self.assertNotIn('foo', new_session)
finally:
self.session.delete(old_session_key)
self.session.delete(new_session_key)
class DatabaseSessionTests(SessionTestsMixin, TestCase):
backend = DatabaseSession
def test_session_str(self):
"Session repr should be the session key."
self.session['x'] = 1
self.session.save()
session_key = self.session.session_key
s = Session.objects.get(session_key=session_key)
self.assertEqual(force_text(s), session_key)
def test_session_get_decoded(self):
"""
Test we can use Session.get_decoded to retrieve data stored
in normal way
"""
self.session['x'] = 1
self.session.save()
s = Session.objects.get(session_key=self.session.session_key)
self.assertEqual(s.get_decoded(), {'x': 1})
def test_sessionmanager_save(self):
"""
Test SessionManager.save method
"""
# Create a session
self.session['y'] = 1
self.session.save()
s = Session.objects.get(session_key=self.session.session_key)
# Change it
Session.objects.save(s.session_key, {'y': 2}, s.expire_date)
# Clear cache, so that it will be retrieved from DB
del self.session._session_cache
self.assertEqual(self.session['y'], 2)
@override_settings(SESSION_ENGINE="django.contrib.sessions.backends.db")
def test_clearsessions_command(self):
"""
Test clearsessions command for clearing expired sessions.
"""
self.assertEqual(0, Session.objects.count())
# One object in the future
self.session['foo'] = 'bar'
self.session.set_expiry(3600)
self.session.save()
# One object in the past
other_session = self.backend()
other_session['foo'] = 'bar'
other_session.set_expiry(-3600)
other_session.save()
# Two sessions are in the database before clearsessions...
self.assertEqual(2, Session.objects.count())
management.call_command('clearsessions')
# ... and one is deleted.
self.assertEqual(1, Session.objects.count())
@override_settings(USE_TZ=True)
class DatabaseSessionWithTimeZoneTests(DatabaseSessionTests):
pass
class CacheDBSessionTests(SessionTestsMixin, TestCase):
backend = CacheDBSession
@unittest.skipIf('DummyCache' in
settings.CACHES[settings.SESSION_CACHE_ALIAS]['BACKEND'],
"Session saving tests require a real cache backend")
def test_exists_searches_cache_first(self):
self.session.save()
with self.assertNumQueries(0):
self.assertTrue(self.session.exists(self.session.session_key))
# Some backends might issue a warning
@ignore_warnings(module="django.core.cache.backends.base")
def test_load_overlong_key(self):
self.session._session_key = (string.ascii_letters + string.digits) * 20
self.assertEqual(self.session.load(), {})
@override_settings(SESSION_CACHE_ALIAS='sessions')
def test_non_default_cache(self):
# 21000 - CacheDB backend should respect SESSION_CACHE_ALIAS.
self.assertRaises(InvalidCacheBackendError, self.backend)
@override_settings(USE_TZ=True)
class CacheDBSessionWithTimeZoneTests(CacheDBSessionTests):
pass
# Don't need DB flushing for these tests, so can use unittest.TestCase as base class
class FileSessionTests(SessionTestsMixin, unittest.TestCase):
backend = FileSession
def setUp(self):
# Do file session tests in an isolated directory, and kill it after we're done.
self.original_session_file_path = settings.SESSION_FILE_PATH
self.temp_session_store = settings.SESSION_FILE_PATH = tempfile.mkdtemp()
# Reset the file session backend's internal caches
if hasattr(self.backend, '_storage_path'):
del self.backend._storage_path
super(FileSessionTests, self).setUp()
def tearDown(self):
super(FileSessionTests, self).tearDown()
settings.SESSION_FILE_PATH = self.original_session_file_path
shutil.rmtree(self.temp_session_store)
@override_settings(
SESSION_FILE_PATH="/if/this/directory/exists/you/have/a/weird/computer")
def test_configuration_check(self):
del self.backend._storage_path
# Make sure the file backend checks for a good storage dir
self.assertRaises(ImproperlyConfigured, self.backend)
def test_invalid_key_backslash(self):
# Ensure we don't allow directory-traversal.
# This is tested directly on _key_to_file, as load() will swallow
# a SuspiciousOperation in the same way as an IOError - by creating
# a new session, making it unclear whether the slashes were detected.
self.assertRaises(InvalidSessionKey,
self.backend()._key_to_file, "a\\b\\c")
def test_invalid_key_forwardslash(self):
# Ensure we don't allow directory-traversal
self.assertRaises(InvalidSessionKey,
self.backend()._key_to_file, "a/b/c")
@override_settings(SESSION_ENGINE="django.contrib.sessions.backends.file")
def test_clearsessions_command(self):
"""
Test clearsessions command for clearing expired sessions.
"""
storage_path = self.backend._get_storage_path()
file_prefix = settings.SESSION_COOKIE_NAME
def count_sessions():
return len([session_file for session_file in os.listdir(storage_path)
if session_file.startswith(file_prefix)])
self.assertEqual(0, count_sessions())
# One object in the future
self.session['foo'] = 'bar'
self.session.set_expiry(3600)
self.session.save()
# One object in the past
other_session = self.backend()
other_session['foo'] = 'bar'
other_session.set_expiry(-3600)
other_session.save()
# Two sessions are in the filesystem before clearsessions...
self.assertEqual(2, count_sessions())
management.call_command('clearsessions')
# ... and one is deleted.
self.assertEqual(1, count_sessions())
class CacheSessionTests(SessionTestsMixin, unittest.TestCase):
backend = CacheSession
# Some backends might issue a warning
@ignore_warnings(module="django.core.cache.backends.base")
def test_load_overlong_key(self):
self.session._session_key = (string.ascii_letters + string.digits) * 20
self.assertEqual(self.session.load(), {})
def test_default_cache(self):
self.session.save()
self.assertNotEqual(caches['default'].get(self.session.cache_key), None)
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
},
'sessions': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'session',
},
}, SESSION_CACHE_ALIAS='sessions')
def test_non_default_cache(self):
# Re-initialize the session backend to make use of overridden settings.
self.session = self.backend()
self.session.save()
self.assertEqual(caches['default'].get(self.session.cache_key), None)
self.assertNotEqual(caches['sessions'].get(self.session.cache_key), None)
class SessionMiddlewareTests(TestCase):
@override_settings(SESSION_COOKIE_SECURE=True)
def test_secure_session_cookie(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
self.assertTrue(
response.cookies[settings.SESSION_COOKIE_NAME]['secure'])
@override_settings(SESSION_COOKIE_HTTPONLY=True)
def test_httponly_session_cookie(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
self.assertTrue(
response.cookies[settings.SESSION_COOKIE_NAME]['httponly'])
self.assertIn(http_cookies.Morsel._reserved['httponly'],
str(response.cookies[settings.SESSION_COOKIE_NAME]))
@override_settings(SESSION_COOKIE_HTTPONLY=False)
def test_no_httponly_session_cookie(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
self.assertFalse(response.cookies[settings.SESSION_COOKIE_NAME]['httponly'])
self.assertNotIn(http_cookies.Morsel._reserved['httponly'],
str(response.cookies[settings.SESSION_COOKIE_NAME]))
def test_session_save_on_500(self):
request = RequestFactory().get('/')
response = HttpResponse('Horrible error')
response.status_code = 500
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
# Check that the value wasn't saved above.
self.assertNotIn('hello', request.session.load())
def test_session_delete_on_end(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Before deleting, there has to be an existing cookie
request.COOKIES[settings.SESSION_COOKIE_NAME] = 'abc'
# Simulate a request that ends the session
middleware.process_request(request)
request.session.flush()
# Handle the response through the middleware
response = middleware.process_response(request, response)
# Check that the cookie was deleted, not recreated.
# A deleted cookie header looks like:
# Set-Cookie: sessionid=; expires=Thu, 01-Jan-1970 00:00:00 GMT; Max-Age=0; Path=/
self.assertEqual(
'Set-Cookie: {}=; expires=Thu, 01-Jan-1970 00:00:00 GMT; '
'Max-Age=0; Path=/'.format(settings.SESSION_COOKIE_NAME),
str(response.cookies[settings.SESSION_COOKIE_NAME])
)
# Don't need DB flushing for these tests, so can use unittest.TestCase as base class
class CookieSessionTests(SessionTestsMixin, unittest.TestCase):
backend = CookieSession
def test_save(self):
"""
This test tested exists() in the other session backends, but that
doesn't make sense for us.
"""
pass
def test_cycle(self):
"""
This test tested cycle_key() which would create a new session
key for the same session data. But we can't invalidate previously
signed cookies (other than letting them expire naturally) so
testing for this behavior is meaningless.
"""
pass
@unittest.expectedFailure
def test_actual_expiry(self):
# The cookie backend doesn't handle non-default expiry dates, see #19201
super(CookieSessionTests, self).test_actual_expiry()
def test_unpickling_exception(self):
# signed_cookies backend should handle unpickle exceptions gracefully
# by creating a new session
self.assertEqual(self.session.serializer, JSONSerializer)
self.session.save()
self.session.serializer = PickleSerializer
self.session.load()
|
YKonovalov/guesti
|
refs/heads/master
|
guesti/cloud/os/commands.py
|
1
|
# This file is part of GuestI.
#
# GuestI is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SSP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GuestI. If not, see <http://www.gnu.org/licenses/>.
import sys
import traceback
import argparse
from time import sleep, gmtime, strftime
import abc
from guesti.cloud.common import ABS_CLOUD
from guesti.envdefault import EnvDefault
import guesti.constants
from glanceclient import client as glance_client
from cinderclient import client as cinder_client
from keystoneclient.v2_0 import client as keystone_client
from novaclient import client as nova_client
import logging
LOG = logging.getLogger(guesti.constants.PROGRAM_NAME + ".os")
LOADER_NAME = "Cloud iPXE installers boot ISO image"
def add_args_os(parser):
p_os = parser.add_argument_group('OpenStack Environment', """OpenStack credentials for accessing cloud services.
It's recommended to create a restricted user account if you planning to
store credentials to be able to run this tool from cron. You can also
specify this options using corresponding environment variables.""")
p_os.add_argument('--os-auth-url', action=EnvDefault, envvar='OS_AUTH_URL',
help='URL for keystone API endpoint')
p_os.add_argument('--os-region-name', action=EnvDefault, envvar='OS_REGION_NAME', required=False,
help='Region name to use for running installator instance')
p_os.add_argument('--os-tenant-id', action=EnvDefault, envvar='OS_TENANT_ID',
help='Project id to use for running installator instance')
p_os.add_argument('--os-tenant-name', action=EnvDefault, envvar='OS_TENANT_NAME',
help='Project name to use for running installator instance')
p_os.add_argument('--os-username', action=EnvDefault, envvar='OS_USERNAME',
help='username for keystone authentication')
p_os.add_argument('--os-password', action=EnvDefault, envvar='OS_PASSWORD',
help='secret for keystone authentication')
p_os.add_argument('--os-image-url', action=EnvDefault, envvar='OS_IMAGE_URL',
help='URL for Glance image storage API endpoint')
return p_os
def add_args_to_parser(parser):
# cloud
p_cloud = parser.add_parser('os', help='OpenStack cloud')
p_command = p_cloud.add_subparsers(title='Supported commands', help=False, dest='command')
# install
p_install = p_command.add_parser('install', help='Launch and snapshot a scripted install using modified iPXE image')
p_install_template = p_install.add_argument_group('Template', """A machine template attributes to be used to register image""")
p_install_template.add_argument('--template-name', '-t', dest='template_name', required=True,
help='Name to assign to newly created template')
p_install_instance = p_install.add_argument_group('Installator Instance Parameters', """Installation will be performed in cloud VM instance. Here you can
adjust VM attributes according to OS installer system requirements.""")
p_install_instance.add_argument('--ipxe-image-id', action=EnvDefault, envvar='IPXE_IMAGE_ID',
help="""Special cloud ipxe.iso boot image snapshot is required for installers boot to succeed.
It should be modified to request iPXE script from cloud user-data URL. See more info in README.
Please specify existing snapshot-id of cloud ipxe image. You can upload and create snapshot with
os-upload-cloud-ipxe tool""")
p_install_instance.add_argument('--install-flavor-id', action=EnvDefault, envvar='INSTALL_FLAVOR_ID',
help='Type of installation machine')
p_install_instance.add_argument('--install-network-id', action=EnvDefault, envvar='INSTALL_NETWORK_ID',
help='Network to use for installation')
p_install_instance.add_argument('--virtualization-type', dest='virt_type', choices=['kvm-virtio', 'kvm-lagacy'], default="kvm-virtio",
help='Specify "kvm-lagacy" for guest with no support for VirtIO (default: kvm-virtio). Will be inherited by result template.')
p_install_loader = p_install.add_argument_group('PXE Loader Parameters', """Required iPXE loader options for booting OS installer from the network. It could
be either Linux boot options or multiboot options. For Linux you must specify --kernel and --initrd.
For multiboot --kernel and one or more --module normally required.""")
p_install_loader.add_argument('--kernel', dest='kernel', required=True,
help='URL for installer kernel file with kernel options to make auto install happend. E.g. "repo={URL} ks={URL}" for anaconda-based distros and "preseed/url={URL} ..." for debian-based distros.')
p_install_loader.add_argument('--initrd', dest='initrd',
help='URL for installer initrd file (for normal boot)')
p_install_loader.add_argument('--module', action='append', dest='modules',
help='URL(s) for installer modules file (for multiboot)')
p_install_loader.add_argument('--chain', dest='chain',
help='URL for installer chain loader file (for generic network boot)')
add_args_os(p_install)
# upload
p_upload = p_command.add_parser('upload_loader',
help='Upload to Glance storage and make a snapshot of iPXE boot image in the cloud')
p_upload_os = add_args_os(p_upload)
p_upload.add_argument('--ipxe-file', type=argparse.FileType('r'),
help='iPXE boot loader file', default="ipxe.iso")
class OS_CLOUD(ABS_CLOUD):
""" OpenStack Cloud interface"""
name = "os"
__cleanup = True
__quiet = None
__menu = None
__runid = None
template_name = None
installer_name = None
installer_image_id = None
bootdisk_size = None
virt_type = None
install_flavor_id = None
auth_url = None
tenant = None
username = None
password = None
glance_url = None
def __init__(self, args):
super(OS_CLOUD, self).__init__(args)
self.__cleanup = args.cleanup
self.__quiet = args.quiet
self.__runid = strftime("%Y%m%d-%H%M%S", gmtime())
# Cloud endpoints and credentials
self.auth_url = args.os_auth_url
self.region = args.os_region_name
self.tenant = args.os_tenant_id
self.tenant_name = args.os_tenant_name
self.username = args.os_username
self.password = args.os_password
if args.command == "upload_loader":
self.glance_url = args.os_image_url
LOG.debug("cloud: {0}, image storage: {1}".format(self.auth_url, self.glance_url))
elif args.command == "install":
# Prepare loader menu
modules = ""
if args.modules:
for m in args.modules:
modules = modules + "module " + m + "\n"
kernel = args.kernel
initrd = "initrd " + args.initrd + "\n" if args.initrd else "\n"
chain = args.chain
if chain:
self.__menu = "#!ipxe\nchain {0}\n".format(chain)
else:
self.__menu = "#!ipxe\nkernel {0}\n{1}{2}boot\n".format(kernel, initrd, modules)
LOG.debug("iPXE script:\n---\n{0}\n---\n".format(self.__menu))
# Template params
self.template_name = args.template_name + " (updated " + strftime("%Y-%m-%d", gmtime()) + ")"
LOG.debug("Template: {0}".format(self.template_name))
# Install machine params
self.installer_name = "Installer of {0}".format(args.template_name)
self.installer_image_id = args.ipxe_image_id
self.install_network_id = args.install_network_id
self.virt_type = args.virt_type
self.install_flavor_id = args.install_flavor_id
self.glance_url = args.os_image_url
LOG.debug("installer: {0}, loader: {1}, boot: {2}, virt: {3}, machine: {4}".format(
self.installer_name, self.installer_image_id, self.bootdisk_size, self.virt_type, self.install_flavor_id))
LOG.debug("cloud: {0}".format(self.auth_url))
LOG.info("Initialized")
def install(self):
""" Launch os installer, wait for it to finnish and take a snapshot """
# run time globals
c2c = None
instance_id = None
install_ok = False
image_id = None
image_ok = False
success = False
exitcode = 0
try:
# Install
LOG.info("About to run install instance from {0} with disk {1} and name {2} ".format(
self.installer_image_id,
self.bootdisk_size,
self.installer_name))
osk = keystone_client.Client(username=self.username, password=self.password, tenant_id=self.tenant, auth_url=self.auth_url)
#if not osk.authenticate():
# LOG.error("Failed to authenticate to {0} tenant:{1} as:{2}.".format(self.auth_url, self.tenant, self.username))
# exitcode = 1
# sys.exit(exitcode)
osi = glance_client.Client("1", endpoint=self.glance_url, token=osk.auth_token)
osc = nova_client.Client('2', username=self.username, api_key=self.password, region_name=self.region, project_id=self.tenant_name, auth_url=self.auth_url, insecure=True, http_log_debug=True)
instance = osc.servers.create(name=self.installer_name,
image=self.installer_image_id,
flavor=self.install_flavor_id,
userdata=self.__menu,
nics=[{'net-id': self.install_network_id}])
try:
instance_id = instance.id
except KeyError:
LOG.error("Failed to run instance: {0}.".format(self.installer_name))
exitcode = 3
sys.exit(exitcode)
LOG.info("Installer launched: {0} {1}. Waiting for instance to stop...".format(
instance_id, self.installer_name))
for i in range(120):
instance_updated = osc.servers.get(instance_id)
try:
s = instance_updated.status
except KeyError:
LOG.error("Failed to find running instance {0} {1}.".format(instance_id, self.installer_name))
exitcode = 4
sys.exit(exitcode)
if s in ["BUILD", "ACTIVE"]:
if not self.__quiet:
pass
#sys.stdout.write('.')
#sys.stdout.flush()
elif s == "SHUTOFF":
LOG.info("Installation finnished {0} {1}".format(
instance_id, self.installer_name))
install_ok = True
break
else:
LOG.warning("Instance {0} {1} is in unexpected state: {2}.".format(instance_id, self.installer_name, s))
sleep(60)
if not install_ok:
LOG.error("Intallation {0} {1} timed out.".format(instance.id, self.installer_name))
exitcode = 5
sys.exit(exitcode)
# Snapshot
LOG.info("About to snapshot install instance {0} to {1}".format(
instance_id, self.template_name))
image = instance.create_image(self.template_name)
LOG.debug("image: {0}".format(image))
try:
image_id = image
except KeyError:
LOG.error("Failed to create template {1} from instance {0}".format(instance_id, self.template_name))
exitcode = 6
sys.exit(exitcode)
LOG.info("Template {0} is creating. Waiting for image {1} to finnish copying...".format(
self.template_name, image_id))
for i in range(120):
try:
image_updated = osi.images.get(image_id)
s = image_updated.status
except KeyError:
LOG.error("Failed to find template {0}.".format(image_id))
exitcode = 7
sys.exit(exitcode)
else:
pass
if s in ["queued","saving"]:
LOG.info("Template {0} is copying. Waiting.".format(image_id))
elif s == "active":
LOG.info("Template {0} is ready.".format(image_id))
image_ok = True
break
else:
LOG.warning("Template is in unexpected state: {0}.".format(s))
sleep(20)
success = True
except Exception as e:
LOG.error("Install failed for {0}".format(self.template_name))
exitcode = 1
LOG.critical("{0}\n{1}\n{2}\n".format(
"-" * 3 + " Exception details " + "-" * 50, traceback.format_exc(), "-" * 60))
finally:
if self.__cleanup:
LOG.info("Cleaning up")
if instance_id:
if osc:
LOG.info("Terminating temporary (installer) instance ({0})".format(instance_id))
instance = osc.servers.get(instance_id)
instance.delete()
else:
LOG.debug("Not removing installer instance because we don't have a connection to cloud")
else:
LOG.debug("Not removing installer instance because we don't have a instance ID")
else:
LOG.warning("Leaving installer instance and disk (requested by --no-cleanup)")
if image_id:
LOG.info("-" * 60 + "\nTemplate Details:\n ID: {0}\n Name: {1}\n".format(
image_id, self.template_name))
sys.exit(exitcode)
def upload_loader(self):
""" Upload iPXE loader ISO to object storage and make a bootable snapshot. """
cloud_ipxe_image = "ipxe.iso"
snapshot_name = LOADER_NAME + " " + self.__runid
c2_file = None
image_id = None
success = False
exitcode = 0
try:
# upload
osk = keystone_client.Client(username=self.username, password=self.password, tenant_id=self.tenant, auth_url=self.auth_url)
if not osk.authenticate():
LOG.error("Failed to authenticate to {0} tenant:{1} as:{2}.".format(self.auth_url, self.tenant, self.username))
exitcode = 1
sys.exit(exitcode)
osi = glance_client.Client("1", endpoint=self.glance_url, token=osk.auth_token)
LOG.info("Uploading {0} to Glance image storage ({1} name: {2})".format(cloud_ipxe_image, self.glance_url, snapshot_name))
data = open(cloud_ipxe_image, "r")
image = osi.images.create(name=snapshot_name,data=data,disk_format='raw',container_format='bare')
#image = osi.images.create(name=snapshot_name,data=cloud_ipxe_image,size=os.path.getsize(cloud_ipxe_image))
#meta = {'container_format': 'bare','disk_format': 'raw', 'data': data, 'is_public': True, 'min_disk': 0, 'min_ram': 0, 'name': snapshot_name, 'properties': {'distro': 'rhel'}}
#image.update(**meta)
image_id = image.id
if not image.status:
LOG.error("Upload failed ({0})".format(cloud_ipxe_image))
exitcode = 4
sys.exit(exitcode)
LOG.info("Uploaded {0} {1}".format(image_id, snapshot_name))
except Exception as e:
LOG.error("Upload failed")
exitcode = 1
LOG.critical("{0}\n{1}\n{2}\n".format(
"-" * 3 + " Exception details " + "-" * 50, traceback.format_exc(), "-" * 60))
finally:
if self.__cleanup:
LOG.info("Cleaning up")
else:
LOG.warning("Leaving temporary object (requested by --no-cleanup)")
if image_id:
LOG.info("-" * 3 + " UPLOADED\nSnapshot Details:\n ID: {0}\n Name: {1}\n".format(
image_id, snapshot_name))
sys.exit(exitcode)
|
Pretio/boto
|
refs/heads/develop
|
boto/mturk/connection.py
|
112
|
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import xml.sax
import datetime
import itertools
from boto import handler
from boto import config
from boto.mturk.price import Price
import boto.mturk.notification
from boto.connection import AWSQueryConnection
from boto.exception import EC2ResponseError
from boto.resultset import ResultSet
from boto.mturk.question import QuestionForm, ExternalQuestion, HTMLQuestion
class MTurkRequestError(EC2ResponseError):
"Error for MTurk Requests"
# todo: subclass from an abstract parent of EC2ResponseError
class MTurkConnection(AWSQueryConnection):
APIVersion = '2012-03-25'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None,
host=None, debug=0,
https_connection_factory=None, security_token=None,
profile_name=None):
if not host:
if config.has_option('MTurk', 'sandbox') and config.get('MTurk', 'sandbox') == 'True':
host = 'mechanicalturk.sandbox.amazonaws.com'
else:
host = 'mechanicalturk.amazonaws.com'
self.debug = debug
super(MTurkConnection, self).__init__(aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass, host, debug,
https_connection_factory,
security_token=security_token,
profile_name=profile_name)
def _required_auth_capability(self):
return ['mturk']
def get_account_balance(self):
"""
"""
params = {}
return self._process_request('GetAccountBalance', params,
[('AvailableBalance', Price),
('OnHoldBalance', Price)])
def register_hit_type(self, title, description, reward, duration,
keywords=None, approval_delay=None, qual_req=None):
"""
Register a new HIT Type
title, description are strings
reward is a Price object
duration can be a timedelta, or an object castable to an int
"""
params = dict(
Title=title,
Description=description,
AssignmentDurationInSeconds=self.duration_as_seconds(duration),
)
params.update(MTurkConnection.get_price_as_price(reward).get_as_params('Reward'))
if keywords:
params['Keywords'] = self.get_keywords_as_string(keywords)
if approval_delay is not None:
d = self.duration_as_seconds(approval_delay)
params['AutoApprovalDelayInSeconds'] = d
if qual_req is not None:
params.update(qual_req.get_as_params())
return self._process_request('RegisterHITType', params,
[('HITTypeId', HITTypeId)])
def set_email_notification(self, hit_type, email, event_types=None):
"""
Performs a SetHITTypeNotification operation to set email
notification for a specified HIT type
"""
return self._set_notification(hit_type, 'Email', email,
'SetHITTypeNotification', event_types)
def set_rest_notification(self, hit_type, url, event_types=None):
"""
Performs a SetHITTypeNotification operation to set REST notification
for a specified HIT type
"""
return self._set_notification(hit_type, 'REST', url,
'SetHITTypeNotification', event_types)
def set_sqs_notification(self, hit_type, queue_url, event_types=None):
"""
Performs a SetHITTypeNotification operation so set SQS notification
for a specified HIT type. Queue URL is of form:
https://queue.amazonaws.com/<CUSTOMER_ID>/<QUEUE_NAME> and can be
found when looking at the details for a Queue in the AWS Console
"""
return self._set_notification(hit_type, "SQS", queue_url,
'SetHITTypeNotification', event_types)
def send_test_event_notification(self, hit_type, url,
event_types=None,
test_event_type='Ping'):
"""
Performs a SendTestEventNotification operation with REST notification
for a specified HIT type
"""
return self._set_notification(hit_type, 'REST', url,
'SendTestEventNotification',
event_types, test_event_type)
def _set_notification(self, hit_type, transport,
destination, request_type,
event_types=None, test_event_type=None):
"""
Common operation to set notification or send a test event
notification for a specified HIT type
"""
params = {'HITTypeId': hit_type}
# from the Developer Guide:
# The 'Active' parameter is optional. If omitted, the active status of
# the HIT type's notification specification is unchanged. All HIT types
# begin with their notification specifications in the "inactive" status.
notification_params = {'Destination': destination,
'Transport': transport,
'Version': boto.mturk.notification.NotificationMessage.NOTIFICATION_VERSION,
'Active': True,
}
# add specific event types if required
if event_types:
self.build_list_params(notification_params, event_types,
'EventType')
# Set up dict of 'Notification.1.Transport' etc. values
notification_rest_params = {}
num = 1
for key in notification_params:
notification_rest_params['Notification.%d.%s' % (num, key)] = notification_params[key]
# Update main params dict
params.update(notification_rest_params)
# If test notification, specify the notification type to be tested
if test_event_type:
params.update({'TestEventType': test_event_type})
# Execute operation
return self._process_request(request_type, params)
def create_hit(self, hit_type=None, question=None, hit_layout=None,
lifetime=datetime.timedelta(days=7),
max_assignments=1,
title=None, description=None, keywords=None,
reward=None, duration=datetime.timedelta(days=7),
approval_delay=None, annotation=None,
questions=None, qualifications=None,
layout_params=None, response_groups=None):
"""
Creates a new HIT.
Returns a ResultSet
See: http://docs.amazonwebservices.com/AWSMechTurk/2012-03-25/AWSMturkAPI/ApiReference_CreateHITOperation.html
"""
# Handle basic required arguments and set up params dict
params = {'LifetimeInSeconds':
self.duration_as_seconds(lifetime),
'MaxAssignments': max_assignments,
}
# handle single or multiple questions or layouts
neither = question is None and questions is None
if hit_layout is None:
both = question is not None and questions is not None
if neither or both:
raise ValueError("Must specify question (single Question instance) or questions (list or QuestionForm instance), but not both")
if question:
questions = [question]
question_param = QuestionForm(questions)
if isinstance(question, QuestionForm):
question_param = question
elif isinstance(question, ExternalQuestion):
question_param = question
elif isinstance(question, HTMLQuestion):
question_param = question
params['Question'] = question_param.get_as_xml()
else:
if not neither:
raise ValueError("Must not specify question (single Question instance) or questions (list or QuestionForm instance) when specifying hit_layout")
params['HITLayoutId'] = hit_layout
if layout_params:
params.update(layout_params.get_as_params())
# if hit type specified then add it
# else add the additional required parameters
if hit_type:
params['HITTypeId'] = hit_type
else:
# Handle keywords
final_keywords = MTurkConnection.get_keywords_as_string(keywords)
# Handle price argument
final_price = MTurkConnection.get_price_as_price(reward)
final_duration = self.duration_as_seconds(duration)
additional_params = dict(
Title=title,
Description=description,
Keywords=final_keywords,
AssignmentDurationInSeconds=final_duration,
)
additional_params.update(final_price.get_as_params('Reward'))
if approval_delay is not None:
d = self.duration_as_seconds(approval_delay)
additional_params['AutoApprovalDelayInSeconds'] = d
# add these params to the others
params.update(additional_params)
# add the annotation if specified
if annotation is not None:
params['RequesterAnnotation'] = annotation
# Add the Qualifications if specified
if qualifications is not None:
params.update(qualifications.get_as_params())
# Handle optional response groups argument
if response_groups:
self.build_list_params(params, response_groups, 'ResponseGroup')
# Submit
return self._process_request('CreateHIT', params, [('HIT', HIT)])
def change_hit_type_of_hit(self, hit_id, hit_type):
"""
Change the HIT type of an existing HIT. Note that the reward associated
with the new HIT type must match the reward of the current HIT type in
order for the operation to be valid.
:type hit_id: str
:type hit_type: str
"""
params = {'HITId': hit_id,
'HITTypeId': hit_type}
return self._process_request('ChangeHITTypeOfHIT', params)
def get_reviewable_hits(self, hit_type=None, status='Reviewable',
sort_by='Expiration', sort_direction='Ascending',
page_size=10, page_number=1):
"""
Retrieve the HITs that have a status of Reviewable, or HITs that
have a status of Reviewing, and that belong to the Requester
calling the operation.
"""
params = {'Status': status,
'SortProperty': sort_by,
'SortDirection': sort_direction,
'PageSize': page_size,
'PageNumber': page_number}
# Handle optional hit_type argument
if hit_type is not None:
params.update({'HITTypeId': hit_type})
return self._process_request('GetReviewableHITs', params,
[('HIT', HIT)])
@staticmethod
def _get_pages(page_size, total_records):
"""
Given a page size (records per page) and a total number of
records, return the page numbers to be retrieved.
"""
pages = total_records / page_size + bool(total_records % page_size)
return list(range(1, pages + 1))
def get_all_hits(self):
"""
Return all of a Requester's HITs
Despite what search_hits says, it does not return all hits, but
instead returns a page of hits. This method will pull the hits
from the server 100 at a time, but will yield the results
iteratively, so subsequent requests are made on demand.
"""
page_size = 100
search_rs = self.search_hits(page_size=page_size)
total_records = int(search_rs.TotalNumResults)
get_page_hits = lambda page: self.search_hits(page_size=page_size, page_number=page)
page_nums = self._get_pages(page_size, total_records)
hit_sets = itertools.imap(get_page_hits, page_nums)
return itertools.chain.from_iterable(hit_sets)
def search_hits(self, sort_by='CreationTime', sort_direction='Ascending',
page_size=10, page_number=1, response_groups=None):
"""
Return a page of a Requester's HITs, on behalf of the Requester.
The operation returns HITs of any status, except for HITs that
have been disposed with the DisposeHIT operation.
Note:
The SearchHITs operation does not accept any search parameters
that filter the results.
"""
params = {'SortProperty': sort_by,
'SortDirection': sort_direction,
'PageSize': page_size,
'PageNumber': page_number}
# Handle optional response groups argument
if response_groups:
self.build_list_params(params, response_groups, 'ResponseGroup')
return self._process_request('SearchHITs', params, [('HIT', HIT)])
def get_assignment(self, assignment_id, response_groups=None):
"""
Retrieves an assignment using the assignment's ID. Requesters can only
retrieve their own assignments, and only assignments whose related HIT
has not been disposed.
The returned ResultSet will have the following attributes:
Request
This element is present only if the Request ResponseGroup
is specified.
Assignment
The assignment. The response includes one Assignment object.
HIT
The HIT associated with this assignment. The response
includes one HIT object.
"""
params = {'AssignmentId': assignment_id}
# Handle optional response groups argument
if response_groups:
self.build_list_params(params, response_groups, 'ResponseGroup')
return self._process_request('GetAssignment', params,
[('Assignment', Assignment),
('HIT', HIT)])
def get_assignments(self, hit_id, status=None,
sort_by='SubmitTime', sort_direction='Ascending',
page_size=10, page_number=1, response_groups=None):
"""
Retrieves completed assignments for a HIT.
Use this operation to retrieve the results for a HIT.
The returned ResultSet will have the following attributes:
NumResults
The number of assignments on the page in the filtered results
list, equivalent to the number of assignments being returned
by this call.
A non-negative integer, as a string.
PageNumber
The number of the page in the filtered results list being
returned.
A positive integer, as a string.
TotalNumResults
The total number of HITs in the filtered results list based
on this call.
A non-negative integer, as a string.
The ResultSet will contain zero or more Assignment objects
"""
params = {'HITId': hit_id,
'SortProperty': sort_by,
'SortDirection': sort_direction,
'PageSize': page_size,
'PageNumber': page_number}
if status is not None:
params['AssignmentStatus'] = status
# Handle optional response groups argument
if response_groups:
self.build_list_params(params, response_groups, 'ResponseGroup')
return self._process_request('GetAssignmentsForHIT', params,
[('Assignment', Assignment)])
def approve_assignment(self, assignment_id, feedback=None):
"""
"""
params = {'AssignmentId': assignment_id}
if feedback:
params['RequesterFeedback'] = feedback
return self._process_request('ApproveAssignment', params)
def reject_assignment(self, assignment_id, feedback=None):
"""
"""
params = {'AssignmentId': assignment_id}
if feedback:
params['RequesterFeedback'] = feedback
return self._process_request('RejectAssignment', params)
def approve_rejected_assignment(self, assignment_id, feedback=None):
"""
"""
params = {'AssignmentId': assignment_id}
if feedback:
params['RequesterFeedback'] = feedback
return self._process_request('ApproveRejectedAssignment', params)
def get_file_upload_url(self, assignment_id, question_identifier):
"""
Generates and returns a temporary URL to an uploaded file. The
temporary URL is used to retrieve the file as an answer to a
FileUploadAnswer question, it is valid for 60 seconds.
Will have a FileUploadURL attribute as per the API Reference.
"""
params = {'AssignmentId': assignment_id,
'QuestionIdentifier': question_identifier}
return self._process_request('GetFileUploadURL', params,
[('FileUploadURL', FileUploadURL)])
def get_hit(self, hit_id, response_groups=None):
"""
"""
params = {'HITId': hit_id}
# Handle optional response groups argument
if response_groups:
self.build_list_params(params, response_groups, 'ResponseGroup')
return self._process_request('GetHIT', params, [('HIT', HIT)])
def set_reviewing(self, hit_id, revert=None):
"""
Update a HIT with a status of Reviewable to have a status of Reviewing,
or reverts a Reviewing HIT back to the Reviewable status.
Only HITs with a status of Reviewable can be updated with a status of
Reviewing. Similarly, only Reviewing HITs can be reverted back to a
status of Reviewable.
"""
params = {'HITId': hit_id}
if revert:
params['Revert'] = revert
return self._process_request('SetHITAsReviewing', params)
def disable_hit(self, hit_id, response_groups=None):
"""
Remove a HIT from the Mechanical Turk marketplace, approves all
submitted assignments that have not already been approved or rejected,
and disposes of the HIT and all assignment data.
Assignments for the HIT that have already been submitted, but not yet
approved or rejected, will be automatically approved. Assignments in
progress at the time of the call to DisableHIT will be approved once
the assignments are submitted. You will be charged for approval of
these assignments. DisableHIT completely disposes of the HIT and
all submitted assignment data. Assignment results data cannot be
retrieved for a HIT that has been disposed.
It is not possible to re-enable a HIT once it has been disabled.
To make the work from a disabled HIT available again, create a new HIT.
"""
params = {'HITId': hit_id}
# Handle optional response groups argument
if response_groups:
self.build_list_params(params, response_groups, 'ResponseGroup')
return self._process_request('DisableHIT', params)
def dispose_hit(self, hit_id):
"""
Dispose of a HIT that is no longer needed.
Only HITs in the "reviewable" state, with all submitted
assignments approved or rejected, can be disposed. A Requester
can call GetReviewableHITs to determine which HITs are
reviewable, then call GetAssignmentsForHIT to retrieve the
assignments. Disposing of a HIT removes the HIT from the
results of a call to GetReviewableHITs. """
params = {'HITId': hit_id}
return self._process_request('DisposeHIT', params)
def expire_hit(self, hit_id):
"""
Expire a HIT that is no longer needed.
The effect is identical to the HIT expiring on its own. The
HIT no longer appears on the Mechanical Turk web site, and no
new Workers are allowed to accept the HIT. Workers who have
accepted the HIT prior to expiration are allowed to complete
it or return it, or allow the assignment duration to elapse
(abandon the HIT). Once all remaining assignments have been
submitted, the expired HIT becomes"reviewable", and will be
returned by a call to GetReviewableHITs.
"""
params = {'HITId': hit_id}
return self._process_request('ForceExpireHIT', params)
def extend_hit(self, hit_id, assignments_increment=None,
expiration_increment=None):
"""
Increase the maximum number of assignments, or extend the
expiration date, of an existing HIT.
NOTE: If a HIT has a status of Reviewable and the HIT is
extended to make it Available, the HIT will not be returned by
GetReviewableHITs, and its submitted assignments will not be
returned by GetAssignmentsForHIT, until the HIT is Reviewable
again. Assignment auto-approval will still happen on its
original schedule, even if the HIT has been extended. Be sure
to retrieve and approve (or reject) submitted assignments
before extending the HIT, if so desired.
"""
# must provide assignment *or* expiration increment
if (assignments_increment is None and expiration_increment is None) or \
(assignments_increment is not None and expiration_increment is not None):
raise ValueError("Must specify either assignments_increment or expiration_increment, but not both")
params = {'HITId': hit_id}
if assignments_increment:
params['MaxAssignmentsIncrement'] = assignments_increment
if expiration_increment:
params['ExpirationIncrementInSeconds'] = expiration_increment
return self._process_request('ExtendHIT', params)
def get_help(self, about, help_type='Operation'):
"""
Return information about the Mechanical Turk Service
operations and response group NOTE - this is basically useless
as it just returns the URL of the documentation
help_type: either 'Operation' or 'ResponseGroup'
"""
params = {'About': about, 'HelpType': help_type}
return self._process_request('Help', params)
def grant_bonus(self, worker_id, assignment_id, bonus_price, reason):
"""
Issues a payment of money from your account to a Worker. To
be eligible for a bonus, the Worker must have submitted
results for one of your HITs, and have had those results
approved or rejected. This payment happens separately from the
reward you pay to the Worker when you approve the Worker's
assignment. The Bonus must be passed in as an instance of the
Price object.
"""
params = bonus_price.get_as_params('BonusAmount', 1)
params['WorkerId'] = worker_id
params['AssignmentId'] = assignment_id
params['Reason'] = reason
return self._process_request('GrantBonus', params)
def block_worker(self, worker_id, reason):
"""
Block a worker from working on my tasks.
"""
params = {'WorkerId': worker_id, 'Reason': reason}
return self._process_request('BlockWorker', params)
def unblock_worker(self, worker_id, reason):
"""
Unblock a worker from working on my tasks.
"""
params = {'WorkerId': worker_id, 'Reason': reason}
return self._process_request('UnblockWorker', params)
def notify_workers(self, worker_ids, subject, message_text):
"""
Send a text message to workers.
"""
params = {'Subject': subject,
'MessageText': message_text}
self.build_list_params(params, worker_ids, 'WorkerId')
return self._process_request('NotifyWorkers', params)
def create_qualification_type(self,
name,
description,
status,
keywords=None,
retry_delay=None,
test=None,
answer_key=None,
answer_key_xml=None,
test_duration=None,
auto_granted=False,
auto_granted_value=1):
"""
Create a new Qualification Type.
name: This will be visible to workers and must be unique for a
given requester.
description: description shown to workers. Max 2000 characters.
status: 'Active' or 'Inactive'
keywords: list of keyword strings or comma separated string.
Max length of 1000 characters when concatenated with commas.
retry_delay: number of seconds after requesting a
qualification the worker must wait before they can ask again.
If not specified, workers can only request this qualification
once.
test: a QuestionForm
answer_key: an XML string of your answer key, for automatically
scored qualification tests.
(Consider implementing an AnswerKey class for this to support.)
test_duration: the number of seconds a worker has to complete the test.
auto_granted: if True, requests for the Qualification are granted
immediately. Can't coexist with a test.
auto_granted_value: auto_granted qualifications are given this value.
"""
params = {'Name': name,
'Description': description,
'QualificationTypeStatus': status,
}
if retry_delay is not None:
params['RetryDelayInSeconds'] = retry_delay
if test is not None:
assert(isinstance(test, QuestionForm))
assert(test_duration is not None)
params['Test'] = test.get_as_xml()
if test_duration is not None:
params['TestDurationInSeconds'] = test_duration
if answer_key is not None:
if isinstance(answer_key, basestring):
params['AnswerKey'] = answer_key # xml
else:
raise TypeError
# Eventually someone will write an AnswerKey class.
if auto_granted:
assert(test is None)
params['AutoGranted'] = True
params['AutoGrantedValue'] = auto_granted_value
if keywords:
params['Keywords'] = self.get_keywords_as_string(keywords)
return self._process_request('CreateQualificationType', params,
[('QualificationType',
QualificationType)])
def get_qualification_type(self, qualification_type_id):
params = {'QualificationTypeId': qualification_type_id }
return self._process_request('GetQualificationType', params,
[('QualificationType', QualificationType)])
def get_all_qualifications_for_qual_type(self, qualification_type_id):
page_size = 100
search_qual = self.get_qualifications_for_qualification_type(qualification_type_id)
total_records = int(search_qual.TotalNumResults)
get_page_quals = lambda page: self.get_qualifications_for_qualification_type(qualification_type_id = qualification_type_id, page_size=page_size, page_number = page)
page_nums = self._get_pages(page_size, total_records)
qual_sets = itertools.imap(get_page_quals, page_nums)
return itertools.chain.from_iterable(qual_sets)
def get_qualifications_for_qualification_type(self, qualification_type_id, page_size=100, page_number = 1):
params = {'QualificationTypeId': qualification_type_id,
'PageSize': page_size,
'PageNumber': page_number}
return self._process_request('GetQualificationsForQualificationType', params,
[('Qualification', Qualification)])
def update_qualification_type(self, qualification_type_id,
description=None,
status=None,
retry_delay=None,
test=None,
answer_key=None,
test_duration=None,
auto_granted=None,
auto_granted_value=None):
params = {'QualificationTypeId': qualification_type_id}
if description is not None:
params['Description'] = description
if status is not None:
params['QualificationTypeStatus'] = status
if retry_delay is not None:
params['RetryDelayInSeconds'] = retry_delay
if test is not None:
assert(isinstance(test, QuestionForm))
params['Test'] = test.get_as_xml()
if test_duration is not None:
params['TestDurationInSeconds'] = test_duration
if answer_key is not None:
if isinstance(answer_key, basestring):
params['AnswerKey'] = answer_key # xml
else:
raise TypeError
# Eventually someone will write an AnswerKey class.
if auto_granted is not None:
params['AutoGranted'] = auto_granted
if auto_granted_value is not None:
params['AutoGrantedValue'] = auto_granted_value
return self._process_request('UpdateQualificationType', params,
[('QualificationType', QualificationType)])
def dispose_qualification_type(self, qualification_type_id):
"""TODO: Document."""
params = {'QualificationTypeId': qualification_type_id}
return self._process_request('DisposeQualificationType', params)
def search_qualification_types(self, query=None, sort_by='Name',
sort_direction='Ascending', page_size=10,
page_number=1, must_be_requestable=True,
must_be_owned_by_caller=True):
"""TODO: Document."""
params = {'Query': query,
'SortProperty': sort_by,
'SortDirection': sort_direction,
'PageSize': page_size,
'PageNumber': page_number,
'MustBeRequestable': must_be_requestable,
'MustBeOwnedByCaller': must_be_owned_by_caller}
return self._process_request('SearchQualificationTypes', params,
[('QualificationType', QualificationType)])
def get_qualification_requests(self, qualification_type_id,
sort_by='Expiration',
sort_direction='Ascending', page_size=10,
page_number=1):
"""TODO: Document."""
params = {'QualificationTypeId': qualification_type_id,
'SortProperty': sort_by,
'SortDirection': sort_direction,
'PageSize': page_size,
'PageNumber': page_number}
return self._process_request('GetQualificationRequests', params,
[('QualificationRequest', QualificationRequest)])
def grant_qualification(self, qualification_request_id, integer_value=1):
"""TODO: Document."""
params = {'QualificationRequestId': qualification_request_id,
'IntegerValue': integer_value}
return self._process_request('GrantQualification', params)
def revoke_qualification(self, subject_id, qualification_type_id,
reason=None):
"""TODO: Document."""
params = {'SubjectId': subject_id,
'QualificationTypeId': qualification_type_id,
'Reason': reason}
return self._process_request('RevokeQualification', params)
def assign_qualification(self, qualification_type_id, worker_id,
value=1, send_notification=True):
params = {'QualificationTypeId': qualification_type_id,
'WorkerId' : worker_id,
'IntegerValue' : value,
'SendNotification' : send_notification}
return self._process_request('AssignQualification', params)
def get_qualification_score(self, qualification_type_id, worker_id):
"""TODO: Document."""
params = {'QualificationTypeId' : qualification_type_id,
'SubjectId' : worker_id}
return self._process_request('GetQualificationScore', params,
[('Qualification', Qualification)])
def update_qualification_score(self, qualification_type_id, worker_id,
value):
"""TODO: Document."""
params = {'QualificationTypeId' : qualification_type_id,
'SubjectId' : worker_id,
'IntegerValue' : value}
return self._process_request('UpdateQualificationScore', params)
def _process_request(self, request_type, params, marker_elems=None):
"""
Helper to process the xml response from AWS
"""
params['Operation'] = request_type
response = self.make_request(None, params, verb='POST')
return self._process_response(response, marker_elems)
def _process_response(self, response, marker_elems=None):
"""
Helper to process the xml response from AWS
"""
body = response.read()
if self.debug == 2:
print(body)
if '<Errors>' not in body:
rs = ResultSet(marker_elems)
h = handler.XmlHandler(rs, self)
xml.sax.parseString(body, h)
return rs
else:
raise MTurkRequestError(response.status, response.reason, body)
@staticmethod
def get_keywords_as_string(keywords):
"""
Returns a comma+space-separated string of keywords from either
a list or a string
"""
if isinstance(keywords, list):
keywords = ', '.join(keywords)
if isinstance(keywords, str):
final_keywords = keywords
elif isinstance(keywords, unicode):
final_keywords = keywords.encode('utf-8')
elif keywords is None:
final_keywords = ""
else:
raise TypeError("keywords argument must be a string or a list of strings; got a %s" % type(keywords))
return final_keywords
@staticmethod
def get_price_as_price(reward):
"""
Returns a Price data structure from either a float or a Price
"""
if isinstance(reward, Price):
final_price = reward
else:
final_price = Price(reward)
return final_price
@staticmethod
def duration_as_seconds(duration):
if isinstance(duration, datetime.timedelta):
duration = duration.days * 86400 + duration.seconds
try:
duration = int(duration)
except TypeError:
raise TypeError("Duration must be a timedelta or int-castable, got %s" % type(duration))
return duration
class BaseAutoResultElement(object):
"""
Base class to automatically add attributes when parsing XML
"""
def __init__(self, connection):
pass
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
setattr(self, name, value)
class HIT(BaseAutoResultElement):
"""
Class to extract a HIT structure from a response (used in ResultSet)
Will have attributes named as per the Developer Guide,
e.g. HITId, HITTypeId, CreationTime
"""
# property helper to determine if HIT has expired
def _has_expired(self):
""" Has this HIT expired yet? """
expired = False
if hasattr(self, 'Expiration'):
now = datetime.datetime.utcnow()
expiration = datetime.datetime.strptime(self.Expiration, '%Y-%m-%dT%H:%M:%SZ')
expired = (now >= expiration)
else:
raise ValueError("ERROR: Request for expired property, but no Expiration in HIT!")
return expired
# are we there yet?
expired = property(_has_expired)
class FileUploadURL(BaseAutoResultElement):
"""
Class to extract an FileUploadURL structure from a response
"""
pass
class HITTypeId(BaseAutoResultElement):
"""
Class to extract an HITTypeId structure from a response
"""
pass
class Qualification(BaseAutoResultElement):
"""
Class to extract an Qualification structure from a response (used in
ResultSet)
Will have attributes named as per the Developer Guide such as
QualificationTypeId, IntegerValue. Does not seem to contain GrantTime.
"""
pass
class QualificationType(BaseAutoResultElement):
"""
Class to extract an QualificationType structure from a response (used in
ResultSet)
Will have attributes named as per the Developer Guide,
e.g. QualificationTypeId, CreationTime, Name, etc
"""
pass
class QualificationRequest(BaseAutoResultElement):
"""
Class to extract an QualificationRequest structure from a response (used in
ResultSet)
Will have attributes named as per the Developer Guide,
e.g. QualificationRequestId, QualificationTypeId, SubjectId, etc
"""
def __init__(self, connection):
super(QualificationRequest, self).__init__(connection)
self.answers = []
def endElement(self, name, value, connection):
# the answer consists of embedded XML, so it needs to be parsed independantly
if name == 'Answer':
answer_rs = ResultSet([('Answer', QuestionFormAnswer)])
h = handler.XmlHandler(answer_rs, connection)
value = connection.get_utf8_value(value)
xml.sax.parseString(value, h)
self.answers.append(answer_rs)
else:
super(QualificationRequest, self).endElement(name, value, connection)
class Assignment(BaseAutoResultElement):
"""
Class to extract an Assignment structure from a response (used in
ResultSet)
Will have attributes named as per the Developer Guide,
e.g. AssignmentId, WorkerId, HITId, Answer, etc
"""
def __init__(self, connection):
super(Assignment, self).__init__(connection)
self.answers = []
def endElement(self, name, value, connection):
# the answer consists of embedded XML, so it needs to be parsed independantly
if name == 'Answer':
answer_rs = ResultSet([('Answer', QuestionFormAnswer)])
h = handler.XmlHandler(answer_rs, connection)
value = connection.get_utf8_value(value)
xml.sax.parseString(value, h)
self.answers.append(answer_rs)
else:
super(Assignment, self).endElement(name, value, connection)
class QuestionFormAnswer(BaseAutoResultElement):
"""
Class to extract Answers from inside the embedded XML
QuestionFormAnswers element inside the Answer element which is
part of the Assignment and QualificationRequest structures
A QuestionFormAnswers element contains an Answer element for each
question in the HIT or Qualification test for which the Worker
provided an answer. Each Answer contains a QuestionIdentifier
element whose value corresponds to the QuestionIdentifier of a
Question in the QuestionForm. See the QuestionForm data structure
for more information about questions and answer specifications.
If the question expects a free-text answer, the Answer element
contains a FreeText element. This element contains the Worker's
answer
*NOTE* - currently really only supports free-text and selection answers
"""
def __init__(self, connection):
super(QuestionFormAnswer, self).__init__(connection)
self.fields = []
self.qid = None
def endElement(self, name, value, connection):
if name == 'QuestionIdentifier':
self.qid = value
elif name in ['FreeText', 'SelectionIdentifier', 'OtherSelectionText'] and self.qid:
self.fields.append(value)
|
technologiescollege/Blockly-rduino-communication
|
refs/heads/master
|
scripts_XP/Lib/idlelib/idle_test/test_text.py
|
14
|
# Test mock_tk.Text class against tkinter.Text class by running same tests with both.
import unittest
from test.support import requires
from _tkinter import TclError
class TextTest(object):
hw = 'hello\nworld' # usual initial insert after initialization
hwn = hw+'\n' # \n present at initialization, before insert
Text = None
def setUp(self):
self.text = self.Text()
def test_init(self):
self.assertEqual(self.text.get('1.0'), '\n')
self.assertEqual(self.text.get('end'), '')
def test_index_empty(self):
index = self.text.index
for dex in (-1.0, 0.3, '1.-1', '1.0', '1.0 lineend', '1.end', '1.33',
'insert'):
self.assertEqual(index(dex), '1.0')
for dex in 'end', 2.0, '2.1', '33.44':
self.assertEqual(index(dex), '2.0')
def test_index_data(self):
index = self.text.index
self.text.insert('1.0', self.hw)
for dex in -1.0, 0.3, '1.-1', '1.0':
self.assertEqual(index(dex), '1.0')
for dex in '1.0 lineend', '1.end', '1.33':
self.assertEqual(index(dex), '1.5')
for dex in 'end', '33.44':
self.assertEqual(index(dex), '3.0')
def test_get(self):
get = self.text.get
Equal = self.assertEqual
self.text.insert('1.0', self.hw)
Equal(get('end'), '')
Equal(get('end', 'end'), '')
Equal(get('1.0'), 'h')
Equal(get('1.0', '1.1'), 'h')
Equal(get('1.0', '1.3'), 'hel')
Equal(get('1.1', '1.3'), 'el')
Equal(get('1.0', '1.0 lineend'), 'hello')
Equal(get('1.0', '1.10'), 'hello')
Equal(get('1.0 lineend'), '\n')
Equal(get('1.1', '2.3'), 'ello\nwor')
Equal(get('1.0', '2.5'), self.hw)
Equal(get('1.0', 'end'), self.hwn)
Equal(get('0.0', '5.0'), self.hwn)
def test_insert(self):
insert = self.text.insert
get = self.text.get
Equal = self.assertEqual
insert('1.0', self.hw)
Equal(get('1.0', 'end'), self.hwn)
insert('1.0', '') # nothing
Equal(get('1.0', 'end'), self.hwn)
insert('1.0', '*')
Equal(get('1.0', 'end'), '*hello\nworld\n')
insert('1.0 lineend', '*')
Equal(get('1.0', 'end'), '*hello*\nworld\n')
insert('2.3', '*')
Equal(get('1.0', 'end'), '*hello*\nwor*ld\n')
insert('end', 'x')
Equal(get('1.0', 'end'), '*hello*\nwor*ldx\n')
insert('1.4', 'x\n')
Equal(get('1.0', 'end'), '*helx\nlo*\nwor*ldx\n')
def test_no_delete(self):
# if index1 == 'insert' or 'end' or >= end, there is no deletion
delete = self.text.delete
get = self.text.get
Equal = self.assertEqual
self.text.insert('1.0', self.hw)
delete('insert')
Equal(get('1.0', 'end'), self.hwn)
delete('end')
Equal(get('1.0', 'end'), self.hwn)
delete('insert', 'end')
Equal(get('1.0', 'end'), self.hwn)
delete('insert', '5.5')
Equal(get('1.0', 'end'), self.hwn)
delete('1.4', '1.0')
Equal(get('1.0', 'end'), self.hwn)
delete('1.4', '1.4')
Equal(get('1.0', 'end'), self.hwn)
def test_delete_char(self):
delete = self.text.delete
get = self.text.get
Equal = self.assertEqual
self.text.insert('1.0', self.hw)
delete('1.0')
Equal(get('1.0', '1.end'), 'ello')
delete('1.0', '1.1')
Equal(get('1.0', '1.end'), 'llo')
# delete \n and combine 2 lines into 1
delete('1.end')
Equal(get('1.0', '1.end'), 'lloworld')
self.text.insert('1.3', '\n')
delete('1.10')
Equal(get('1.0', '1.end'), 'lloworld')
self.text.insert('1.3', '\n')
delete('1.3', '2.0')
Equal(get('1.0', '1.end'), 'lloworld')
def test_delete_slice(self):
delete = self.text.delete
get = self.text.get
Equal = self.assertEqual
self.text.insert('1.0', self.hw)
delete('1.0', '1.0 lineend')
Equal(get('1.0', 'end'), '\nworld\n')
delete('1.0', 'end')
Equal(get('1.0', 'end'), '\n')
self.text.insert('1.0', self.hw)
delete('1.0', '2.0')
Equal(get('1.0', 'end'), 'world\n')
delete('1.0', 'end')
Equal(get('1.0', 'end'), '\n')
self.text.insert('1.0', self.hw)
delete('1.2', '2.3')
Equal(get('1.0', 'end'), 'held\n')
def test_multiple_lines(self): # insert and delete
self.text.insert('1.0', 'hello')
self.text.insert('1.3', '1\n2\n3\n4\n5')
self.assertEqual(self.text.get('1.0', 'end'), 'hel1\n2\n3\n4\n5lo\n')
self.text.delete('1.3', '5.1')
self.assertEqual(self.text.get('1.0', 'end'), 'hello\n')
def test_compare(self):
compare = self.text.compare
Equal = self.assertEqual
# need data so indexes not squished to 1,0
self.text.insert('1.0', 'First\nSecond\nThird\n')
self.assertRaises(TclError, compare, '2.2', 'op', '2.2')
for op, less1, less0, equal, greater0, greater1 in (
('<', True, True, False, False, False),
('<=', True, True, True, False, False),
('>', False, False, False, True, True),
('>=', False, False, True, True, True),
('==', False, False, True, False, False),
('!=', True, True, False, True, True),
):
Equal(compare('1.1', op, '2.2'), less1, op)
Equal(compare('2.1', op, '2.2'), less0, op)
Equal(compare('2.2', op, '2.2'), equal, op)
Equal(compare('2.3', op, '2.2'), greater0, op)
Equal(compare('3.3', op, '2.2'), greater1, op)
class MockTextTest(TextTest, unittest.TestCase):
@classmethod
def setUpClass(cls):
from idlelib.idle_test.mock_tk import Text
cls.Text = Text
def test_decode(self):
# test endflags (-1, 0) not tested by test_index (which uses +1)
decode = self.text._decode
Equal = self.assertEqual
self.text.insert('1.0', self.hw)
Equal(decode('end', -1), (2, 5))
Equal(decode('3.1', -1), (2, 5))
Equal(decode('end', 0), (2, 6))
Equal(decode('3.1', 0), (2, 6))
class TkTextTest(TextTest, unittest.TestCase):
@classmethod
def setUpClass(cls):
requires('gui')
from tkinter import Tk, Text
cls.Text = Text
cls.root = Tk()
@classmethod
def tearDownClass(cls):
cls.root.destroy()
del cls.root
if __name__ == '__main__':
unittest.main(verbosity=2, exit=False)
|
hlmnrmr/superdesk-core
|
refs/heads/master
|
superdesk/io/commands/__init__.py
|
395
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
|
JoshuaRBogart/unreal_tools
|
refs/heads/master
|
vertex_animation.py
|
1
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
bl_info = {
"name": "Vertex Animation",
"author": "Joshua Bogart",
"version": (1, 0),
"blender": (2, 83, 0),
"location": "View3D > Sidebar > Unreal Tools Tab",
"description": "A tool for storing per frame vertex data for use in a vertex shader.",
"warning": "",
"doc_url": "",
"category": "Unreal Tools",
}
import bpy
import bmesh
def get_per_frame_mesh_data(context, data, objects):
"""Return a list of combined mesh data per frame"""
meshes = []
for i in frame_range(context.scene):
context.scene.frame_set(i)
depsgraph = context.evaluated_depsgraph_get()
bm = bmesh.new()
for ob in objects:
eval_object = ob.evaluated_get(depsgraph)
me = data.meshes.new_from_object(eval_object)
me.transform(ob.matrix_world)
bm.from_mesh(me)
data.meshes.remove(me)
me = data.meshes.new("mesh")
bm.to_mesh(me)
bm.free()
me.calc_normals()
meshes.append(me)
return meshes
def create_export_mesh_object(context, data, me):
"""Return a mesh object with correct UVs"""
while len(me.uv_layers) < 2:
me.uv_layers.new()
uv_layer = me.uv_layers[1]
uv_layer.name = "vertex_anim"
for loop in me.loops:
uv_layer.data[loop.index].uv = (
(loop.vertex_index + 0.5)/len(me.vertices), 128/255
)
ob = data.objects.new("export_mesh", me)
context.scene.collection.objects.link(ob)
return ob
def get_vertex_data(data, meshes):
"""Return lists of vertex offsets and normals from a list of mesh data"""
original = meshes[0].vertices
offsets = []
normals = []
for me in reversed(meshes):
for v in me.vertices:
offset = v.co - original[v.index].co
x, y, z = offset
offsets.extend((x, -y, z, 1))
x, y, z = v.normal
normals.extend(((x + 1) * 0.5, (-y + 1) * 0.5, (z + 1) * 0.5, 1))
if not me.users:
data.meshes.remove(me)
return offsets, normals
def frame_range(scene):
"""Return a range object with with scene's frame start, end, and step"""
return range(scene.frame_start, scene.frame_end, scene.frame_step)
def bake_vertex_data(context, data, offsets, normals, size):
"""Stores vertex offsets and normals in seperate image textures"""
width, height = size
offset_texture = data.images.new(
name="offsets",
width=width,
height=height,
alpha=True,
float_buffer=True
)
normal_texture = data.images.new(
name="normals",
width=width,
height=height,
alpha=True
)
offset_texture.pixels = offsets
normal_texture.pixels = normals
class OBJECT_OT_ProcessAnimMeshes(bpy.types.Operator):
"""Store combined per frame vertex offsets and normals for all
selected mesh objects into seperate image textures"""
bl_idname = "object.process_anim_meshes"
bl_label = "Process Anim Meshes"
@property
def allowed_modifiers(self):
return [
'ARMATURE', 'CAST', 'CURVE', 'DISPLACE', 'HOOK',
'LAPLACIANDEFORM', 'LATTICE', 'MESH_DEFORM',
'SHRINKWRAP', 'SIMPLE_DEFORM', 'SMOOTH',
'CORRECTIVE_SMOOTH', 'LAPLACIANSMOOTH',
'SURFACE_DEFORM', 'WARP', 'WAVE',
]
@classmethod
def poll(cls, context):
ob = context.active_object
return ob and ob.type == 'MESH' and ob.mode == 'OBJECT'
def execute(self, context):
units = context.scene.unit_settings
data = bpy.data
objects = [ob for ob in context.selected_objects if ob.type == 'MESH']
vertex_count = sum([len(ob.data.vertices) for ob in objects])
frame_count = len(frame_range(context.scene))
for ob in objects:
for mod in ob.modifiers:
if mod.type not in self.allowed_modifiers:
self.report(
{'ERROR'},
f"Objects with {mod.type.title()} modifiers are not allowed!"
)
return {'CANCELLED'}
if units.system != 'METRIC' or round(units.scale_length, 2) != 0.01:
self.report(
{'ERROR'},
"Scene Unit must be Metric with a Unit Scale of 0.01!"
)
return {'CANCELLED'}
if vertex_count > 8192:
self.report(
{'ERROR'},
f"Vertex count of {vertex_count :,}, execedes limit of 8,192!"
)
return {'CANCELLED'}
if frame_count > 8192:
self.report(
{'ERROR'},
f"Frame count of {frame_count :,}, execedes limit of 8,192!"
)
return {'CANCELLED'}
meshes = get_per_frame_mesh_data(context, data, objects)
export_mesh_data = meshes[0].copy()
create_export_mesh_object(context, data, export_mesh_data)
offsets, normals = get_vertex_data(data, meshes)
texture_size = vertex_count, frame_count
bake_vertex_data(context, data, offsets, normals, texture_size)
return {'FINISHED'}
class VIEW3D_PT_VertexAnimation(bpy.types.Panel):
"""Creates a Panel in 3D Viewport"""
bl_label = "Vertex Animation"
bl_idname = "VIEW3D_PT_vertex_animation"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = "Unreal Tools"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
scene = context.scene
col = layout.column(align=True)
col.prop(scene, "frame_start", text="Frame Start")
col.prop(scene, "frame_end", text="End")
col.prop(scene, "frame_step", text="Step")
row = layout.row()
row.operator("object.process_anim_meshes")
def register():
bpy.utils.register_class(OBJECT_OT_ProcessAnimMeshes)
bpy.utils.register_class(VIEW3D_PT_VertexAnimation)
def unregister():
bpy.utils.unregister_class(OBJECT_OT_ProcessAnimMeshes)
bpy.utils.unregister_class(VIEW3D_PT_VertexAnimation)
if __name__ == "__main__":
register()
|
crobby/sahara
|
refs/heads/master
|
sahara/plugins/vanilla/edp_engine.py
|
15
|
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.plugins import exceptions as ex
from sahara.plugins import utils as u
from sahara.plugins.vanilla import utils as vu
from sahara.service.edp.oozie import engine as edp_engine
class EdpOozieEngine(edp_engine.OozieJobEngine):
def get_hdfs_user(self):
return 'hadoop'
def get_name_node_uri(self, cluster):
return cluster['info']['HDFS']['NameNode']
def get_oozie_server_uri(self, cluster):
return cluster['info']['JobFlow']['Oozie'] + "/oozie/"
def get_oozie_server(self, cluster):
return vu.get_oozie(cluster)
def validate_job_execution(self, cluster, job, data):
oo_count = u.get_instances_count(cluster, 'oozie')
if oo_count != 1:
raise ex.InvalidComponentCountException('oozie', '1', oo_count)
super(EdpOozieEngine, self).validate_job_execution(cluster, job, data)
|
glwu/python-for-android
|
refs/heads/master
|
python-modules/twisted/twisted/web/soap.py
|
54
|
# -*- test-case-name: twisted.web.test.test_soap -*-
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
SOAP support for twisted.web.
Requires SOAPpy 0.10.1 or later.
Maintainer: Itamar Shtull-Trauring
Future plans:
SOAPContext support of some kind.
Pluggable method lookup policies.
"""
# SOAPpy
import SOAPpy
# twisted imports
from twisted.web import server, resource, client
from twisted.internet import defer
class SOAPPublisher(resource.Resource):
"""Publish SOAP methods.
By default, publish methods beginning with 'soap_'. If the method
has an attribute 'useKeywords', it well get the arguments passed
as keyword args.
"""
isLeaf = 1
# override to change the encoding used for responses
encoding = "UTF-8"
def lookupFunction(self, functionName):
"""Lookup published SOAP function.
Override in subclasses. Default behaviour - publish methods
starting with soap_.
@return: callable or None if not found.
"""
return getattr(self, "soap_%s" % functionName, None)
def render(self, request):
"""Handle a SOAP command."""
data = request.content.read()
p, header, body, attrs = SOAPpy.parseSOAPRPC(data, 1, 1, 1)
methodName, args, kwargs, ns = p._name, p._aslist, p._asdict, p._ns
# deal with changes in SOAPpy 0.11
if callable(args):
args = args()
if callable(kwargs):
kwargs = kwargs()
function = self.lookupFunction(methodName)
if not function:
self._methodNotFound(request, methodName)
return server.NOT_DONE_YET
else:
if hasattr(function, "useKeywords"):
keywords = {}
for k, v in kwargs.items():
keywords[str(k)] = v
d = defer.maybeDeferred(function, **keywords)
else:
d = defer.maybeDeferred(function, *args)
d.addCallback(self._gotResult, request, methodName)
d.addErrback(self._gotError, request, methodName)
return server.NOT_DONE_YET
def _methodNotFound(self, request, methodName):
response = SOAPpy.buildSOAP(SOAPpy.faultType("%s:Client" %
SOAPpy.NS.ENV_T, "Method %s not found" % methodName),
encoding=self.encoding)
self._sendResponse(request, response, status=500)
def _gotResult(self, result, request, methodName):
if not isinstance(result, SOAPpy.voidType):
result = {"Result": result}
response = SOAPpy.buildSOAP(kw={'%sResponse' % methodName: result},
encoding=self.encoding)
self._sendResponse(request, response)
def _gotError(self, failure, request, methodName):
e = failure.value
if isinstance(e, SOAPpy.faultType):
fault = e
else:
fault = SOAPpy.faultType("%s:Server" % SOAPpy.NS.ENV_T,
"Method %s failed." % methodName)
response = SOAPpy.buildSOAP(fault, encoding=self.encoding)
self._sendResponse(request, response, status=500)
def _sendResponse(self, request, response, status=200):
request.setResponseCode(status)
if self.encoding is not None:
mimeType = 'text/xml; charset="%s"' % self.encoding
else:
mimeType = "text/xml"
request.setHeader("Content-type", mimeType)
request.setHeader("Content-length", str(len(response)))
request.write(response)
request.finish()
class Proxy:
"""A Proxy for making remote SOAP calls.
Pass the URL of the remote SOAP server to the constructor.
Use proxy.callRemote('foobar', 1, 2) to call remote method
'foobar' with args 1 and 2, proxy.callRemote('foobar', x=1)
will call foobar with named argument 'x'.
"""
# at some point this should have encoding etc. kwargs
def __init__(self, url, namespace=None, header=None):
self.url = url
self.namespace = namespace
self.header = header
def _cbGotResult(self, result):
result = SOAPpy.parseSOAPRPC(result)
if hasattr(result, 'Result'):
return result.Result
elif len(result) == 1:
## SOAPpy 0.11.6 wraps the return results in a containing structure.
## This check added to make Proxy behaviour emulate SOAPProxy, which
## flattens the structure by default.
## This behaviour is OK because even singleton lists are wrapped in
## another singleton structType, which is almost always useless.
return result[0]
else:
return result
def callRemote(self, method, *args, **kwargs):
payload = SOAPpy.buildSOAP(args=args, kw=kwargs, method=method,
header=self.header, namespace=self.namespace)
return client.getPage(self.url, postdata=payload, method="POST",
headers={'content-type': 'text/xml',
'SOAPAction': method}
).addCallback(self._cbGotResult)
|
JFriel/honours_project
|
refs/heads/master
|
venv/lib/python2.7/site-packages/pip/vendor/html5lib/sanitizer.py
|
805
|
from __future__ import absolute_import, division, unicode_literals
import re
from xml.sax.saxutils import escape, unescape
from .tokenizer import HTMLTokenizer
from .constants import tokenTypes
class HTMLSanitizerMixin(object):
""" sanitization of XHTML+MathML+SVG and of inline style attributes."""
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area',
'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1',
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins',
'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter',
'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option',
'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video']
mathml_elements = ['maction', 'math', 'merror', 'mfrac', 'mi',
'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded', 'mphantom',
'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle', 'msub',
'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none']
svg_elements = ['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'clipPath', 'circle', 'defs', 'desc', 'ellipse',
'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph',
'mpath', 'path', 'polygon', 'polyline', 'radialGradient', 'rect',
'set', 'stop', 'svg', 'switch', 'text', 'title', 'tspan', 'use']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color',
'cols', 'colspan', 'compact', 'contenteditable', 'controls', 'coords',
'data', 'datafld', 'datapagesize', 'datasrc', 'datetime', 'default',
'delay', 'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end',
'face', 'for', 'form', 'frame', 'galleryimg', 'gutter', 'headers',
'height', 'hidefocus', 'hidden', 'high', 'href', 'hreflang', 'hspace',
'icon', 'id', 'inputmode', 'ismap', 'keytype', 'label', 'leftspacing',
'lang', 'list', 'longdesc', 'loop', 'loopcount', 'loopend',
'loopstart', 'low', 'lowsrc', 'max', 'maxlength', 'media', 'method',
'min', 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'open',
'optimum', 'pattern', 'ping', 'point-size', 'poster', 'pqg', 'preload',
'prompt', 'radiogroup', 'readonly', 'rel', 'repeat-max', 'repeat-min',
'replace', 'required', 'rev', 'rightspacing', 'rows', 'rowspan',
'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src', 'start',
'step', 'style', 'summary', 'suppress', 'tabindex', 'target',
'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap',
'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml',
'width', 'wrap', 'xml:lang']
mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'equalcolumns', 'equalrows', 'fence',
'fontstyle', 'fontweight', 'frame', 'height', 'linethickness', 'lspace',
'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant', 'maxsize',
'minsize', 'other', 'rowalign', 'rowalign', 'rowalign', 'rowlines',
'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'stretchy', 'width', 'width', 'xlink:href', 'xlink:show',
'xlink:type', 'xmlns', 'xmlns:xlink']
svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'clip-path', 'color', 'color-rendering', 'content', 'cx',
'cy', 'd', 'dx', 'dy', 'descent', 'display', 'dur', 'end', 'fill',
'fill-opacity', 'fill-rule', 'font-family', 'font-size',
'font-stretch', 'font-style', 'font-variant', 'font-weight', 'from',
'fx', 'fy', 'g1', 'g2', 'glyph-name', 'gradientUnits', 'hanging',
'height', 'horiz-adv-x', 'horiz-origin-x', 'id', 'ideographic', 'k',
'keyPoints', 'keySplines', 'keyTimes', 'lang', 'marker-end',
'marker-mid', 'marker-start', 'markerHeight', 'markerUnits',
'markerWidth', 'mathematical', 'max', 'min', 'name', 'offset',
'opacity', 'orient', 'origin', 'overline-position',
'overline-thickness', 'panose-1', 'path', 'pathLength', 'points',
'preserveAspectRatio', 'r', 'refX', 'refY', 'repeatCount',
'repeatDur', 'requiredExtensions', 'requiredFeatures', 'restart',
'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv', 'stop-color',
'stop-opacity', 'strikethrough-position', 'strikethrough-thickness',
'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap',
'stroke-linejoin', 'stroke-miterlimit', 'stroke-opacity',
'stroke-width', 'systemLanguage', 'target', 'text-anchor', 'to',
'transform', 'type', 'u1', 'u2', 'underline-position',
'underline-thickness', 'unicode', 'unicode-range', 'units-per-em',
'values', 'version', 'viewBox', 'visibility', 'width', 'widths', 'x',
'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y',
'y1', 'y2', 'zoomAndPan']
attr_val_is_uri = ['href', 'src', 'cite', 'action', 'longdesc', 'poster',
'xlink:href', 'xml:base']
svg_attr_val_allows_ref = ['clip-path', 'color-profile', 'cursor', 'fill',
'filter', 'marker', 'marker-start', 'marker-mid', 'marker-end',
'mask', 'stroke']
svg_allow_local_href = ['altGlyph', 'animate', 'animateColor',
'animateMotion', 'animateTransform', 'cursor', 'feImage', 'filter',
'linearGradient', 'pattern', 'radialGradient', 'textpath', 'tref',
'set', 'use']
acceptable_css_properties = ['azimuth', 'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
'white-space', 'width']
acceptable_css_keywords = ['auto', 'aqua', 'black', 'block', 'blue',
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow']
acceptable_svg_properties = ['fill', 'fill-opacity', 'fill-rule',
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
'stroke-opacity']
acceptable_protocols = ['ed2k', 'ftp', 'http', 'https', 'irc',
'mailto', 'news', 'gopher', 'nntp', 'telnet', 'webcal',
'xmpp', 'callto', 'feed', 'urn', 'aim', 'rsync', 'tag',
'ssh', 'sftp', 'rtsp', 'afs']
# subclasses may define their own versions of these constants
allowed_elements = acceptable_elements + mathml_elements + svg_elements
allowed_attributes = acceptable_attributes + mathml_attributes + svg_attributes
allowed_css_properties = acceptable_css_properties
allowed_css_keywords = acceptable_css_keywords
allowed_svg_properties = acceptable_svg_properties
allowed_protocols = acceptable_protocols
# Sanitize the +html+, escaping all elements not in ALLOWED_ELEMENTS, and
# stripping out all # attributes not in ALLOWED_ATTRIBUTES. Style
# attributes are parsed, and a restricted set, # specified by
# ALLOWED_CSS_PROPERTIES and ALLOWED_CSS_KEYWORDS, are allowed through.
# attributes in ATTR_VAL_IS_URI are scanned, and only URI schemes specified
# in ALLOWED_PROTOCOLS are allowed.
#
# sanitize_html('<script> do_nasty_stuff() </script>')
# => <script> do_nasty_stuff() </script>
# sanitize_html('<a href="javascript: sucker();">Click here for $100</a>')
# => <a>Click here for $100</a>
def sanitize_token(self, token):
# accommodate filters which use token_type differently
token_type = token["type"]
if token_type in list(tokenTypes.keys()):
token_type = tokenTypes[token_type]
if token_type in (tokenTypes["StartTag"], tokenTypes["EndTag"],
tokenTypes["EmptyTag"]):
if token["name"] in self.allowed_elements:
return self.allowed_token(token, token_type)
else:
return self.disallowed_token(token, token_type)
elif token_type == tokenTypes["Comment"]:
pass
else:
return token
def allowed_token(self, token, token_type):
if "data" in token:
attrs = dict([(name, val) for name, val in
token["data"][::-1]
if name in self.allowed_attributes])
for attr in self.attr_val_is_uri:
if attr not in attrs:
continue
val_unescaped = re.sub("[`\000-\040\177-\240\s]+", '',
unescape(attrs[attr])).lower()
# remove replacement characters from unescaped characters
val_unescaped = val_unescaped.replace("\ufffd", "")
if (re.match("^[a-z0-9][-+.a-z0-9]*:", val_unescaped) and
(val_unescaped.split(':')[0] not in
self.allowed_protocols)):
del attrs[attr]
for attr in self.svg_attr_val_allows_ref:
if attr in attrs:
attrs[attr] = re.sub(r'url\s*\(\s*[^#\s][^)]+?\)',
' ',
unescape(attrs[attr]))
if (token["name"] in self.svg_allow_local_href and
'xlink:href' in attrs and re.search('^\s*[^#\s].*',
attrs['xlink:href'])):
del attrs['xlink:href']
if 'style' in attrs:
attrs['style'] = self.sanitize_css(attrs['style'])
token["data"] = [[name, val] for name, val in list(attrs.items())]
return token
def disallowed_token(self, token, token_type):
if token_type == tokenTypes["EndTag"]:
token["data"] = "</%s>" % token["name"]
elif token["data"]:
attrs = ''.join([' %s="%s"' % (k, escape(v)) for k, v in token["data"]])
token["data"] = "<%s%s>" % (token["name"], attrs)
else:
token["data"] = "<%s>" % token["name"]
if token.get("selfClosing"):
token["data"] = token["data"][:-1] + "/>"
if token["type"] in list(tokenTypes.keys()):
token["type"] = "Characters"
else:
token["type"] = tokenTypes["Characters"]
del token["name"]
return token
def sanitize_css(self, style):
# disallow urls
style = re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ', style)
# gauntlet
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style):
return ''
if not re.match("^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$", style):
return ''
clean = []
for prop, value in re.findall("([-\w]+)\s*:\s*([^:;]*)", style):
if not value:
continue
if prop.lower() in self.allowed_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background', 'border', 'margin',
'padding']:
for keyword in value.split():
if not keyword in self.acceptable_css_keywords and \
not re.match("^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$", keyword):
break
else:
clean.append(prop + ': ' + value + ';')
elif prop.lower() in self.allowed_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
class HTMLSanitizer(HTMLTokenizer, HTMLSanitizerMixin):
def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True,
lowercaseElementName=False, lowercaseAttrName=False, parser=None):
# Change case matching defaults as we only output lowercase html anyway
# This solution doesn't seem ideal...
HTMLTokenizer.__init__(self, stream, encoding, parseMeta, useChardet,
lowercaseElementName, lowercaseAttrName, parser=parser)
def __iter__(self):
for token in HTMLTokenizer.__iter__(self):
token = self.sanitize_token(token)
if token:
yield token
|
cephalization/autokey
|
refs/heads/master
|
src/test/configurationmanagertest.py
|
53
|
import unittest
import lib.configurationmanager as conf
from lib.phrase import *
CONFIG_FILE = "../../config/abbr.ini"
class LegacyImporterTest(unittest.TestCase):
def setUp(self):
self.importer = conf.LegacyImporter()
self.importer.load_config(CONFIG_FILE)
def testGlobalSettings(self):
# Test old global defaults using a phrase that has no custom options defined
# Locate otoh phrase
otohPhrase = None
for phrase in self.importer.phrases:
if phrase.abbreviation == "otoh":
otohPhrase = phrase
break
self.assert_(otohPhrase is not None)
self.assertEqual(otohPhrase.immediate, False)
self.assertEqual(otohPhrase.ignoreCase, False)
self.assertEqual(otohPhrase.matchCase, False)
self.assertEqual(otohPhrase.backspace, True)
self.assertEqual(otohPhrase.omitTrigger, False)
self.assertEqual(otohPhrase.triggerInside, False)
def testPhraseCount(self):
self.assertEqual(len(self.importer.phrases), 23)
def testPhrase(self):
# Locate brb phrase
brbPhrase = None
for phrase in self.importer.phrases:
if phrase.abbreviation == "brb":
brbPhrase = phrase
break
self.assert_(brbPhrase is not None)
self.assertEqual(brbPhrase.phrase, "be right back")
self.assertEqual(brbPhrase.description, "be right back")
self.assertEqual(brbPhrase.mode, PhraseMode.ABBREVIATION)
self.assertEqual(brbPhrase.immediate, True)
|
havard024/prego
|
refs/heads/master
|
venv/lib/python2.7/site-packages/south/tests/brokenapp/migrations/__init__.py
|
12133432
| |
leafclick/intellij-community
|
refs/heads/master
|
python/testData/formatter/fStringFragmentWrappingSplitInsideExpression.py
|
12
|
s = f'aaaaaa{oct(42)}'
|
woltage/ansible
|
refs/heads/devel
|
lib/ansible/executor/task_queue_manager.py
|
5
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import getpass
import multiprocessing
import os
import socket
import sys
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.executor.connection_info import ConnectionInformation
from ansible.executor.play_iterator import PlayIterator
from ansible.executor.process.worker import WorkerProcess
from ansible.executor.process.result import ResultProcess
from ansible.executor.stats import AggregateStats
from ansible.plugins import callback_loader, strategy_loader
from ansible.template import Templar
from ansible.utils.debug import debug
__all__ = ['TaskQueueManager']
class TaskQueueManager:
'''
This class handles the multiprocessing requirements of Ansible by
creating a pool of worker forks, a result handler fork, and a
manager object with shared datastructures/queues for coordinating
work between all processes.
The queue manager is responsible for loading the play strategy plugin,
which dispatches the Play's tasks to hosts.
'''
def __init__(self, inventory, variable_manager, loader, display, options, passwords, stdout_callback=None):
self._inventory = inventory
self._variable_manager = variable_manager
self._loader = loader
self._display = display
self._options = options
self._stats = AggregateStats()
self.passwords = passwords
self._stdout_callback = stdout_callback
self._callbacks_loaded = False
self._callback_plugins = []
# a special flag to help us exit cleanly
self._terminated = False
# this dictionary is used to keep track of notified handlers
self._notified_handlers = dict()
# dictionaries to keep track of failed/unreachable hosts
self._failed_hosts = dict()
self._unreachable_hosts = dict()
self._final_q = multiprocessing.Queue()
# create the pool of worker threads, based on the number of forks specified
try:
fileno = sys.stdin.fileno()
except ValueError:
fileno = None
self._workers = []
for i in range(self._options.forks):
main_q = multiprocessing.Queue()
rslt_q = multiprocessing.Queue()
prc = WorkerProcess(self, main_q, rslt_q, loader)
prc.start()
self._workers.append((prc, main_q, rslt_q))
self._result_prc = ResultProcess(self._final_q, self._workers)
self._result_prc.start()
def _initialize_notified_handlers(self, handlers):
'''
Clears and initializes the shared notified handlers dict with entries
for each handler in the play, which is an empty array that will contain
inventory hostnames for those hosts triggering the handler.
'''
# Zero the dictionary first by removing any entries there.
# Proxied dicts don't support iteritems, so we have to use keys()
for key in self._notified_handlers.keys():
del self._notified_handlers[key]
# FIXME: there is a block compile helper for this...
handler_list = []
for handler_block in handlers:
for handler in handler_block.block:
handler_list.append(handler)
# then initialize it with the handler names from the handler list
for handler in handler_list:
self._notified_handlers[handler.get_name()] = []
def load_callbacks(self):
'''
Loads all available callbacks, with the exception of those which
utilize the CALLBACK_TYPE option. When CALLBACK_TYPE is set to 'stdout',
only one such callback plugin will be loaded.
'''
if self._callbacks_loaded:
return
stdout_callback_loaded = False
if self._stdout_callback is None:
self._stdout_callback = C.DEFAULT_STDOUT_CALLBACK
if self._stdout_callback not in callback_loader:
raise AnsibleError("Invalid callback for stdout specified: %s" % self._stdout_callback)
for callback_plugin in callback_loader.all(class_only=True):
if hasattr(callback_plugin, 'CALLBACK_VERSION') and callback_plugin.CALLBACK_VERSION >= 2.0:
# we only allow one callback of type 'stdout' to be loaded, so check
# the name of the current plugin and type to see if we need to skip
# loading this callback plugin
callback_type = getattr(callback_plugin, 'CALLBACK_TYPE', None)
(callback_name, _) = os.path.splitext(os.path.basename(callback_plugin._original_path))
if callback_type == 'stdout':
if callback_name != self._stdout_callback or stdout_callback_loaded:
continue
stdout_callback_loaded = True
elif C.DEFAULT_CALLBACK_WHITELIST is None or callback_name not in C.DEFAULT_CALLBACK_WHITELIST:
continue
self._callback_plugins.append(callback_plugin(self._display))
else:
self._callback_plugins.append(callback_plugin())
self._callbacks_loaded = True
def _do_var_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
if prompt and default is not None:
msg = "%s [%s]: " % (prompt, default)
elif prompt:
msg = "%s: " % prompt
else:
msg = 'input for %s: ' % varname
def do_prompt(prompt, private):
if sys.stdout.encoding:
msg = prompt.encode(sys.stdout.encoding)
else:
# when piping the output, or at other times when stdout
# may not be the standard file descriptor, the stdout
# encoding may not be set, so default to something sane
msg = prompt.encode(locale.getpreferredencoding())
if private:
return getpass.getpass(msg)
return raw_input(msg)
if confirm:
while True:
result = do_prompt(msg, private)
second = do_prompt("confirm " + msg, private)
if result == second:
break
display("***** VALUES ENTERED DO NOT MATCH ****")
else:
result = do_prompt(msg, private)
# if result is false and default is not None
if not result and default is not None:
result = default
# FIXME: make this work with vault or whatever this old method was
#if encrypt:
# result = utils.do_encrypt(result, encrypt, salt_size, salt)
# handle utf-8 chars
# FIXME: make this work
#result = to_unicode(result, errors='strict')
return result
def run(self, play):
'''
Iterates over the roles/tasks in a play, using the given (or default)
strategy for queueing tasks. The default is the linear strategy, which
operates like classic Ansible by keeping all hosts in lock-step with
a given task (meaning no hosts move on to the next task until all hosts
are done with the current task).
'''
if not self._callbacks_loaded:
self.load_callbacks()
if play.vars_prompt:
for var in play.vars_prompt:
if 'name' not in var:
raise AnsibleError("'vars_prompt' item is missing 'name:'", obj=play._ds)
vname = var['name']
prompt = var.get("prompt", vname)
default = var.get("default", None)
private = var.get("private", True)
confirm = var.get("confirm", False)
encrypt = var.get("encrypt", None)
salt_size = var.get("salt_size", None)
salt = var.get("salt", None)
if vname not in play.vars:
self.send_callback('v2_playbook_on_vars_prompt', vname, private, prompt, encrypt, confirm, salt_size, salt, default)
play.vars[vname] = self._do_var_prompt(vname, private, prompt, encrypt, confirm, salt_size, salt, default)
all_vars = self._variable_manager.get_vars(loader=self._loader, play=play)
templar = Templar(loader=self._loader, variables=all_vars)
new_play = play.copy()
new_play.post_validate(templar)
connection_info = ConnectionInformation(new_play, self._options, self.passwords)
for callback_plugin in self._callback_plugins:
if hasattr(callback_plugin, 'set_connection_info'):
callback_plugin.set_connection_info(connection_info)
self.send_callback('v2_playbook_on_play_start', new_play)
# initialize the shared dictionary containing the notified handlers
self._initialize_notified_handlers(new_play.handlers)
# load the specified strategy (or the default linear one)
strategy = strategy_loader.get(new_play.strategy, self)
if strategy is None:
raise AnsibleError("Invalid play strategy specified: %s" % new_play.strategy, obj=play._ds)
# build the iterator
iterator = PlayIterator(inventory=self._inventory, play=new_play, connection_info=connection_info, all_vars=all_vars)
# and run the play using the strategy
return strategy.run(iterator, connection_info)
def cleanup(self):
debug("RUNNING CLEANUP")
self.terminate()
self._final_q.close()
self._result_prc.terminate()
for (worker_prc, main_q, rslt_q) in self._workers:
rslt_q.close()
main_q.close()
worker_prc.terminate()
def get_inventory(self):
return self._inventory
def get_variable_manager(self):
return self._variable_manager
def get_loader(self):
return self._loader
def get_notified_handlers(self):
return self._notified_handlers
def get_workers(self):
return self._workers[:]
def terminate(self):
self._terminated = True
def send_callback(self, method_name, *args, **kwargs):
for callback_plugin in self._callback_plugins:
# a plugin that set self.disabled to True will not be called
# see osx_say.py example for such a plugin
if getattr(callback_plugin, 'disabled', False):
continue
methods = [
getattr(callback_plugin, method_name, None),
getattr(callback_plugin, 'on_any', None)
]
for method in methods:
if method is not None:
method(*args, **kwargs)
|
pedrobaeza/odoo
|
refs/heads/master
|
addons/website_event_sale/controllers/main.py
|
54
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.addons.web import http
from openerp.addons.web.http import request
from openerp.addons.website_event.controllers.main import website_event
from openerp.tools.translate import _
class website_event(website_event):
@http.route(['/event/cart/update'], type='http', auth="public", methods=['POST'], website=True)
def cart_update(self, event_id, **post):
cr, uid, context = request.cr, request.uid, request.context
ticket_obj = request.registry.get('event.event.ticket')
sale = False
for key, value in post.items():
quantity = int(value or "0")
if not quantity:
continue
sale = True
ticket_id = key.split("-")[0] == 'ticket' and int(key.split("-")[1]) or None
ticket = ticket_obj.browse(cr, SUPERUSER_ID, ticket_id, context=context)
request.website.sale_get_order(force_create=1)._cart_update(
product_id=ticket.product_id.id, add_qty=quantity, context=dict(context, event_ticket_id=ticket.id))
if not sale:
return request.redirect("/event/%s" % event_id)
return request.redirect("/shop/checkout")
def _add_event(self, event_name="New Event", context={}, **kwargs):
try:
dummy, res_id = request.registry.get('ir.model.data').get_object_reference(request.cr, request.uid, 'event_sale', 'product_product_event')
context['default_event_ticket_ids'] = [[0,0,{
'name': _('Subscription'),
'product_id': res_id,
'deadline' : False,
'seats_max': 1000,
'price': 0,
}]]
except ValueError:
pass
return super(website_event, self)._add_event(event_name, context, **kwargs)
|
croxis/SpaceDrive
|
refs/heads/master
|
spacedrive/renderpipeline/toolkit/pathtracing_reference/res/tex/convert_cubemap.py
|
1
|
"""
Converts the cubemap to a spherical one
"""
from __future__ import division, print_function
from panda3d.core import *
load_prc_file_data("", "textures-power-2 none")
import direct.directbase.DirectStart
cubemap = loader.load_cube_map("../../../../data/default_cubemap/source/#.jpg")
w, h = 4096, 2048
cshader = Shader.make_compute(Shader.SL_GLSL, """
#version 430
layout (local_size_x = 16, local_size_y = 16, local_size_z = 1) in;
uniform samplerCube SourceTex;
uniform writeonly image2D DestTex;
#define M_PI 3.1415926535897932384626433
#define TWO_PI 6.2831853071795864769252867
// Converts a normalized spherical coordinate (r = 1) to cartesian coordinates
vec3 spherical_to_vector(float theta, float phi) {
float sin_theta = sin(theta);
return normalize(vec3(
sin_theta * cos(phi),
sin_theta * sin(phi),
cos(theta)
));
}
// Fixes the cubemap direction
vec3 fix_cubemap_coord(vec3 coord) {
return normalize(coord.xzy * vec3(1,-1,1));
}
void main() {
ivec2 dimensions = imageSize(DestTex).xy;
ivec2 coord = ivec2(gl_GlobalInvocationID.xy);
float theta = (coord.x + 0.5) / float(dimensions.x) * TWO_PI;
float phi = (dimensions.y - coord.y - 0.5) / float(dimensions.y) * M_PI;
vec3 v = spherical_to_vector(phi, theta);
v = fix_cubemap_coord(v);
vec4 color = texture(SourceTex, v);
imageStore(DestTex, coord, vec4(color));
}
""")
dest_tex = Texture("")
dest_tex.setup_2d_texture(w, h, Texture.T_float, Texture.F_rgba16)
print("Converting to spherical coordinates ..")
np = NodePath("np")
np.set_shader(cshader)
np.set_shader_input("SourceTex", cubemap)
np.set_shader_input("DestTex", dest_tex)
attr = np.get_attrib(ShaderAttrib)
base.graphicsEngine.dispatch_compute((w // 16, h // 16, 1), attr, base.win.get_gsg())
print("Extracting data ..")
base.graphicsEngine.extract_texture_data(dest_tex, base.win.get_gsg())
print("Writing texture ..")
dest_tex.write("envmap.png")
|
direvus/ansible
|
refs/heads/devel
|
lib/ansible/plugins/loader.py
|
10
|
# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> and others
# (c) 2017, Toshio Kuratomi <tkuratomi@ansible.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import glob
import imp
import os
import os.path
import sys
import warnings
from collections import defaultdict
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_text
from ansible.parsing.utils.yaml import from_yaml
from ansible.plugins import get_plugin_class, MODULE_CACHE, PATH_CACHE, PLUGIN_PATH_CACHE
from ansible.utils.plugin_docs import get_docstring
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
def get_all_plugin_loaders():
return [(name, obj) for (name, obj) in globals().items() if isinstance(obj, PluginLoader)]
class PluginLoader:
'''
PluginLoader loads plugins from the configured plugin directories.
It searches for plugins by iterating through the combined list of play basedirs, configured
paths, and the python path. The first match is used.
'''
def __init__(self, class_name, package, config, subdir, aliases=None, required_base_class=None):
aliases = {} if aliases is None else aliases
self.class_name = class_name
self.base_class = required_base_class
self.package = package
self.subdir = subdir
# FIXME: remove alias dict in favor of alias by symlink?
self.aliases = aliases
if config and not isinstance(config, list):
config = [config]
elif not config:
config = []
self.config = config
if class_name not in MODULE_CACHE:
MODULE_CACHE[class_name] = {}
if class_name not in PATH_CACHE:
PATH_CACHE[class_name] = None
if class_name not in PLUGIN_PATH_CACHE:
PLUGIN_PATH_CACHE[class_name] = defaultdict(dict)
self._module_cache = MODULE_CACHE[class_name]
self._paths = PATH_CACHE[class_name]
self._plugin_path_cache = PLUGIN_PATH_CACHE[class_name]
self._extra_dirs = []
self._searched_paths = set()
def __setstate__(self, data):
'''
Deserializer.
'''
class_name = data.get('class_name')
package = data.get('package')
config = data.get('config')
subdir = data.get('subdir')
aliases = data.get('aliases')
base_class = data.get('base_class')
PATH_CACHE[class_name] = data.get('PATH_CACHE')
PLUGIN_PATH_CACHE[class_name] = data.get('PLUGIN_PATH_CACHE')
self.__init__(class_name, package, config, subdir, aliases, base_class)
self._extra_dirs = data.get('_extra_dirs', [])
self._searched_paths = data.get('_searched_paths', set())
def __getstate__(self):
'''
Serializer.
'''
return dict(
class_name=self.class_name,
base_class=self.base_class,
package=self.package,
config=self.config,
subdir=self.subdir,
aliases=self.aliases,
_extra_dirs=self._extra_dirs,
_searched_paths=self._searched_paths,
PATH_CACHE=PATH_CACHE[self.class_name],
PLUGIN_PATH_CACHE=PLUGIN_PATH_CACHE[self.class_name],
)
def format_paths(self, paths):
''' Returns a string suitable for printing of the search path '''
# Uses a list to get the order right
ret = []
for i in paths:
if i not in ret:
ret.append(i)
return os.pathsep.join(ret)
def print_paths(self):
return self.format_paths(self._get_paths(subdirs=False))
def _all_directories(self, dir):
results = []
results.append(dir)
for root, subdirs, files in os.walk(dir, followlinks=True):
if '__init__.py' in files:
for x in subdirs:
results.append(os.path.join(root, x))
return results
def _get_package_paths(self, subdirs=True):
''' Gets the path of a Python package '''
if not self.package:
return []
if not hasattr(self, 'package_path'):
m = __import__(self.package)
parts = self.package.split('.')[1:]
for parent_mod in parts:
m = getattr(m, parent_mod)
self.package_path = os.path.dirname(m.__file__)
if subdirs:
return self._all_directories(self.package_path)
return [self.package_path]
def _get_paths(self, subdirs=True):
''' Return a list of paths to search for plugins in '''
# FIXME: This is potentially buggy if subdirs is sometimes True and sometimes False.
# In current usage, everything calls this with subdirs=True except for module_utils_loader and ansible-doc
# which always calls it with subdirs=False. So there currently isn't a problem with this caching.
if self._paths is not None:
return self._paths
ret = self._extra_dirs[:]
# look in any configured plugin paths, allow one level deep for subcategories
if self.config is not None:
for path in self.config:
path = os.path.realpath(os.path.expanduser(path))
if subdirs:
contents = glob.glob("%s/*" % path) + glob.glob("%s/*/*" % path)
for c in contents:
if os.path.isdir(c) and c not in ret:
ret.append(c)
if path not in ret:
ret.append(path)
# look for any plugins installed in the package subtree
# Note package path always gets added last so that every other type of
# path is searched before it.
ret.extend(self._get_package_paths(subdirs=subdirs))
# HACK: because powershell modules are in the same directory
# hierarchy as other modules we have to process them last. This is
# because powershell only works on windows but the other modules work
# anywhere (possibly including windows if the correct language
# interpreter is installed). the non-powershell modules can have any
# file extension and thus powershell modules are picked up in that.
# The non-hack way to fix this is to have powershell modules be
# a different PluginLoader/ModuleLoader. But that requires changing
# other things too (known thing to change would be PATHS_CACHE,
# PLUGIN_PATHS_CACHE, and MODULE_CACHE. Since those three dicts key
# on the class_name and neither regular modules nor powershell modules
# would have class_names, they would not work as written.
reordered_paths = []
win_dirs = []
for path in ret:
if path.endswith('windows'):
win_dirs.append(path)
else:
reordered_paths.append(path)
reordered_paths.extend(win_dirs)
# cache and return the result
self._paths = reordered_paths
return reordered_paths
def _load_config_defs(self, name, path):
''' Reads plugin docs to find configuration setting definitions, to push to config manager for later use '''
# plugins w/o class name don't support config
if self.class_name:
type_name = get_plugin_class(self.class_name)
# if type name != 'module_doc_fragment':
if type_name in C.CONFIGURABLE_PLUGINS:
dstring = get_docstring(path, fragment_loader, verbose=False, ignore_errors=True)[0]
if dstring and 'options' in dstring and isinstance(dstring['options'], dict):
C.config.initialize_plugin_configuration_definitions(type_name, name, dstring['options'])
display.debug('Loaded config def from plugin (%s/%s)' % (type_name, name))
def add_directory(self, directory, with_subdir=False):
''' Adds an additional directory to the search path '''
directory = os.path.realpath(directory)
if directory is not None:
if with_subdir:
directory = os.path.join(directory, self.subdir)
if directory not in self._extra_dirs:
# append the directory and invalidate the path cache
self._extra_dirs.append(directory)
self._paths = None
display.debug('Added %s to loader search path' % (directory))
def _find_plugin(self, name, mod_type='', ignore_deprecated=False, check_aliases=False):
''' Find a plugin named name '''
global _PLUGIN_FILTERS
if name in _PLUGIN_FILTERS[self.package]:
return None
if mod_type:
suffix = mod_type
elif self.class_name:
# Ansible plugins that run in the controller process (most plugins)
suffix = '.py'
else:
# Only Ansible Modules. Ansible modules can be any executable so
# they can have any suffix
suffix = ''
if check_aliases:
name = self.aliases.get(name, name)
# The particular cache to look for modules within. This matches the
# requested mod_type
pull_cache = self._plugin_path_cache[suffix]
try:
return pull_cache[name]
except KeyError:
# Cache miss. Now let's find the plugin
pass
# TODO: Instead of using the self._paths cache (PATH_CACHE) and
# self._searched_paths we could use an iterator. Before enabling that
# we need to make sure we don't want to add additional directories
# (add_directory()) once we start using the iterator. Currently, it
# looks like _get_paths() never forces a cache refresh so if we expect
# additional directories to be added later, it is buggy.
for path in (p for p in self._get_paths() if p not in self._searched_paths and os.path.isdir(p)):
try:
full_paths = (os.path.join(path, f) for f in os.listdir(path))
except OSError as e:
display.warning("Error accessing plugin paths: %s" % to_text(e))
for full_path in (f for f in full_paths if os.path.isfile(f) and not f.endswith('__init__.py')):
full_name = os.path.basename(full_path)
# HACK: We have no way of executing python byte compiled files as ansible modules so specifically exclude them
# FIXME: I believe this is only correct for modules and module_utils.
# For all other plugins we want .pyc and .pyo should be valid
if full_path.endswith(('.pyc', '.pyo')):
continue
splitname = os.path.splitext(full_name)
base_name = splitname[0]
try:
extension = splitname[1]
except IndexError:
extension = ''
# Module found, now enter it into the caches that match this file
if base_name not in self._plugin_path_cache['']:
self._plugin_path_cache[''][base_name] = full_path
if full_name not in self._plugin_path_cache['']:
self._plugin_path_cache[''][full_name] = full_path
if base_name not in self._plugin_path_cache[extension]:
self._plugin_path_cache[extension][base_name] = full_path
if full_name not in self._plugin_path_cache[extension]:
self._plugin_path_cache[extension][full_name] = full_path
self._searched_paths.add(path)
try:
return pull_cache[name]
except KeyError:
# Didn't find the plugin in this directory. Load modules from the next one
pass
# if nothing is found, try finding alias/deprecated
if not name.startswith('_'):
alias_name = '_' + name
# We've already cached all the paths at this point
if alias_name in pull_cache:
if not ignore_deprecated and not os.path.islink(pull_cache[alias_name]):
# FIXME: this is not always the case, some are just aliases
display.deprecated('%s is kept for backwards compatibility but usage is discouraged. ' # pylint: disable=ansible-deprecated-no-version
'The module documentation details page may explain more about this rationale.' % name.lstrip('_'))
return pull_cache[alias_name]
return None
def find_plugin(self, name, mod_type='', ignore_deprecated=False, check_aliases=False):
''' Find a plugin named name '''
# Import here to avoid circular import
from ansible.vars.reserved import is_reserved_name
plugin = self._find_plugin(name, mod_type=mod_type, ignore_deprecated=ignore_deprecated, check_aliases=check_aliases)
if plugin and self.package == 'ansible.modules' and is_reserved_name(name):
raise AnsibleError(
'Module "%s" shadows the name of a reserved keyword. Please rename or remove this module. Found at %s' % (name, plugin)
)
return plugin
def has_plugin(self, name):
''' Checks if a plugin named name exists '''
return self.find_plugin(name) is not None
__contains__ = has_plugin
def _load_module_source(self, name, path):
# avoid collisions across plugins
full_name = '.'.join([self.package, name])
if full_name in sys.modules:
# Avoids double loading, See https://github.com/ansible/ansible/issues/13110
return sys.modules[full_name]
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
with open(path, 'rb') as module_file:
module = imp.load_source(full_name, path, module_file)
return module
def _update_object(self, obj, name, path):
# set extra info on the module, in case we want it later
setattr(obj, '_original_path', path)
setattr(obj, '_load_name', name)
def get(self, name, *args, **kwargs):
''' instantiates a plugin of the given name using arguments '''
found_in_cache = True
class_only = kwargs.pop('class_only', False)
if name in self.aliases:
name = self.aliases[name]
path = self.find_plugin(name)
if path is None:
return None
if path not in self._module_cache:
self._module_cache[path] = self._load_module_source(name, path)
found_in_cache = False
obj = getattr(self._module_cache[path], self.class_name)
if self.base_class:
# The import path is hardcoded and should be the right place,
# so we are not expecting an ImportError.
module = __import__(self.package, fromlist=[self.base_class])
# Check whether this obj has the required base class.
try:
plugin_class = getattr(module, self.base_class)
except AttributeError:
return None
if not issubclass(obj, plugin_class):
return None
self._display_plugin_load(self.class_name, name, self._searched_paths, path, found_in_cache=found_in_cache, class_only=class_only)
if not class_only:
try:
obj = obj(*args, **kwargs)
except TypeError as e:
if "abstract" in e.args[0]:
# Abstract Base Class. The found plugin file does not
# fully implement the defined interface.
return None
raise
# load plugin config data
if not found_in_cache:
self._load_config_defs(name, path)
self._update_object(obj, name, path)
return obj
def _display_plugin_load(self, class_name, name, searched_paths, path, found_in_cache=None, class_only=None):
msg = 'Loading %s \'%s\' from %s' % (class_name, os.path.basename(name), path)
if len(searched_paths) > 1:
msg = '%s (searched paths: %s)' % (msg, self.format_paths(searched_paths))
if found_in_cache or class_only:
msg = '%s (found_in_cache=%s, class_only=%s)' % (msg, found_in_cache, class_only)
display.debug(msg)
def all(self, *args, **kwargs):
'''
Iterate through all plugins of this type
A plugin loader is initialized with a specific type. This function is an iterator returning
all of the plugins of that type to the caller.
:kwarg path_only: If this is set to True, then we return the paths to where the plugins reside
instead of an instance of the plugin. This conflicts with class_only and both should
not be set.
:kwarg class_only: If this is set to True then we return the python class which implements
a plugin rather than an instance of the plugin. This conflicts with path_only and both
should not be set.
:kwarg _dedupe: By default, we only return one plugin per plugin name. Deduplication happens
in the same way as the :meth:`get` and :meth:`find_plugin` methods resolve which plugin
should take precedence. If this is set to False, then we return all of the plugins
found, including those with duplicate names. In the case of duplicates, the order in
which they are returned is the one that would take precedence first, followed by the
others in decreasing precedence order. This should only be used by subclasses which
want to manage their own deduplication of the plugins.
:*args: Any extra arguments are passed to each plugin when it is instantiated.
:**kwargs: Any extra keyword arguments are passed to each plugin when it is instantiated.
'''
# TODO: Change the signature of this method to:
# def all(return_type='instance', args=None, kwargs=None):
# if args is None: args = []
# if kwargs is None: kwargs = {}
# return_type can be instance, class, or path.
# These changes will mean that plugin parameters won't conflict with our params and
# will also make it impossible to request both a path and a class at the same time.
#
# Move _dedupe to be a class attribute, CUSTOM_DEDUPE, with subclasses for filters and
# tests setting it to True
global _PLUGIN_FILTERS
dedupe = kwargs.pop('_dedupe', True)
path_only = kwargs.pop('path_only', False)
class_only = kwargs.pop('class_only', False)
# Having both path_only and class_only is a coding bug
if path_only and class_only:
raise AnsibleError('Do not set both path_only and class_only when calling PluginLoader.all()')
all_matches = []
found_in_cache = True
for i in self._get_paths():
all_matches.extend(glob.glob(os.path.join(i, "*.py")))
loaded_modules = set()
for path in sorted(all_matches, key=os.path.basename):
name = os.path.splitext(path)[0]
basename = os.path.basename(name)
if basename == '__init__' or basename in _PLUGIN_FILTERS[self.package]:
continue
if dedupe and basename in loaded_modules:
continue
loaded_modules.add(basename)
if path_only:
yield path
continue
if path not in self._module_cache:
try:
module = self._load_module_source(name, path)
except Exception as e:
display.warning("Skipping plugin (%s) as it seems to be invalid: %s" % (path, to_text(e)))
self._module_cache[path] = module
found_in_cache = False
try:
obj = getattr(self._module_cache[path], self.class_name)
except AttributeError as e:
display.warning("Skipping plugin (%s) as it seems to be invalid: %s" % (path, to_text(e)))
continue
if self.base_class:
# The import path is hardcoded and should be the right place,
# so we are not expecting an ImportError.
module = __import__(self.package, fromlist=[self.base_class])
# Check whether this obj has the required base class.
try:
plugin_class = getattr(module, self.base_class)
except AttributeError:
continue
if not issubclass(obj, plugin_class):
continue
self._display_plugin_load(self.class_name, basename, self._searched_paths, path, found_in_cache=found_in_cache, class_only=class_only)
if not class_only:
try:
obj = obj(*args, **kwargs)
except TypeError as e:
display.warning("Skipping plugin (%s) as it seems to be incomplete: %s" % (path, to_text(e)))
# load plugin config data
if not found_in_cache:
self._load_config_defs(basename, path)
self._update_object(obj, basename, path)
yield obj
class Jinja2Loader(PluginLoader):
"""
PluginLoader optimized for Jinja2 plugins
The filter and test plugins are Jinja2 plugins encapsulated inside of our plugin format.
The way the calling code is setup, we need to do a few things differently in the all() method
"""
def find_plugin(self, name):
# Nothing using Jinja2Loader use this method. We can't use the base class version because
# we deduplicate differently than the base class
raise AnsibleError('No code should call find_plugin for Jinja2Loaders (Not implemented)')
def get(self, name, *args, **kwargs):
# Nothing using Jinja2Loader use this method. We can't use the base class version because
# we deduplicate differently than the base class
raise AnsibleError('No code should call find_plugin for Jinja2Loaders (Not implemented)')
def all(self, *args, **kwargs):
"""
Differences with :meth:`PluginLoader.all`:
* We do not deduplicate ansible plugin names. This is because we don't care about our
plugin names, here. We care about the names of the actual jinja2 plugins which are inside
of our plugins.
* We reverse the order of the list of plugins compared to other PluginLoaders. This is
because of how calling code chooses to sync the plugins from the list. It adds all the
Jinja2 plugins from one of our Ansible plugins into a dict. Then it adds the Jinja2
plugins from the next Ansible plugin, overwriting any Jinja2 plugins that had the same
name. This is an encapsulation violation (the PluginLoader should not know about what
calling code does with the data) but we're pushing the common code here. We'll fix
this in the future by moving more of the common code into this PluginLoader.
* We return a list. We could iterate the list instead but that's extra work for no gain because
the API receiving this doesn't care. It just needs an iterable
"""
# We don't deduplicate ansible plugin names. Instead, calling code deduplicates jinja2
# plugin names.
kwargs['_dedupe'] = False
# We have to instantiate a list of all plugins so that we can reverse it. We reverse it so
# that calling code will deduplicate this correctly.
plugins = [p for p in super(Jinja2Loader, self).all(*args, **kwargs)]
plugins.reverse()
return plugins
def _load_plugin_filter():
filters = defaultdict(frozenset)
if C.PLUGIN_FILTERS_CFG is None:
filter_cfg = '/etc/ansible/plugin_filters.yml'
user_set = False
else:
filter_cfg = C.PLUGIN_FILTERS_CFG
user_set = True
if os.path.exists(filter_cfg):
with open(filter_cfg, 'rb') as f:
try:
filter_data = from_yaml(f.read())
except Exception as e:
display.warning(u'The plugin filter file, {0} was not parsable.'
u' Skipping: {1}'.format(filter_cfg, to_text(e)))
return filters
try:
version = filter_data['filter_version']
except KeyError:
display.warning(u'The plugin filter file, {0} was invalid.'
u' Skipping.'.format(filter_cfg))
return filters
# Try to convert for people specifying version as a float instead of string
version = to_text(version)
version = version.strip()
if version == u'1.0':
# Modules and action plugins share the same blacklist since the difference between the
# two isn't visible to the users
filters['ansible.modules'] = frozenset(filter_data['module_blacklist'])
filters['ansible.plugins.action'] = filters['ansible.modules']
else:
display.warning(u'The plugin filter file, {0} was a version not recognized by this'
u' version of Ansible. Skipping.')
else:
if user_set:
display.warning(u'The plugin filter file, {0} does not exist.'
u' Skipping.'.format(filter_cfg))
# Specialcase the stat module as Ansible can run very few things if stat is blacklisted.
if 'stat' in filters['ansible.modules']:
raise AnsibleError('The stat module was specified in the module blacklist file, {0}, but'
' Ansible will not function without the stat module. Please remove stat'
' from the blacklist.'.format(filter_cfg))
return filters
# TODO: All of the following is initialization code It should be moved inside of an initialization
# function which is called at some point early in the ansible and ansible-playbook CLI startup.
_PLUGIN_FILTERS = _load_plugin_filter()
# doc fragments first
fragment_loader = PluginLoader(
'ModuleDocFragment',
'ansible.utils.module_docs_fragments',
os.path.join(os.path.dirname(__file__), 'module_docs_fragments'),
'',
)
action_loader = PluginLoader(
'ActionModule',
'ansible.plugins.action',
C.DEFAULT_ACTION_PLUGIN_PATH,
'action_plugins',
required_base_class='ActionBase',
)
cache_loader = PluginLoader(
'CacheModule',
'ansible.plugins.cache',
C.DEFAULT_CACHE_PLUGIN_PATH,
'cache_plugins',
)
callback_loader = PluginLoader(
'CallbackModule',
'ansible.plugins.callback',
C.DEFAULT_CALLBACK_PLUGIN_PATH,
'callback_plugins',
)
connection_loader = PluginLoader(
'Connection',
'ansible.plugins.connection',
C.DEFAULT_CONNECTION_PLUGIN_PATH,
'connection_plugins',
aliases={'paramiko': 'paramiko_ssh'},
required_base_class='ConnectionBase',
)
shell_loader = PluginLoader(
'ShellModule',
'ansible.plugins.shell',
'shell_plugins',
'shell_plugins',
)
module_loader = PluginLoader(
'',
'ansible.modules',
C.DEFAULT_MODULE_PATH,
'library',
)
module_utils_loader = PluginLoader(
'',
'ansible.module_utils',
C.DEFAULT_MODULE_UTILS_PATH,
'module_utils',
)
# NB: dedicated loader is currently necessary because PS module_utils expects "with subdir" lookup where
# regular module_utils doesn't. This can be revisited once we have more granular loaders.
ps_module_utils_loader = PluginLoader(
'',
'ansible.module_utils',
C.DEFAULT_MODULE_UTILS_PATH,
'module_utils',
)
lookup_loader = PluginLoader(
'LookupModule',
'ansible.plugins.lookup',
C.DEFAULT_LOOKUP_PLUGIN_PATH,
'lookup_plugins',
required_base_class='LookupBase',
)
filter_loader = Jinja2Loader(
'FilterModule',
'ansible.plugins.filter',
C.DEFAULT_FILTER_PLUGIN_PATH,
'filter_plugins',
)
test_loader = Jinja2Loader(
'TestModule',
'ansible.plugins.test',
C.DEFAULT_TEST_PLUGIN_PATH,
'test_plugins'
)
strategy_loader = PluginLoader(
'StrategyModule',
'ansible.plugins.strategy',
C.DEFAULT_STRATEGY_PLUGIN_PATH,
'strategy_plugins',
required_base_class='StrategyBase',
)
terminal_loader = PluginLoader(
'TerminalModule',
'ansible.plugins.terminal',
C.DEFAULT_TERMINAL_PLUGIN_PATH,
'terminal_plugins',
required_base_class='TerminalBase'
)
vars_loader = PluginLoader(
'VarsModule',
'ansible.plugins.vars',
C.DEFAULT_VARS_PLUGIN_PATH,
'vars_plugins',
)
cliconf_loader = PluginLoader(
'Cliconf',
'ansible.plugins.cliconf',
C.DEFAULT_CLICONF_PLUGIN_PATH,
'cliconf_plugins',
required_base_class='CliconfBase'
)
netconf_loader = PluginLoader(
'Netconf',
'ansible.plugins.netconf',
C.DEFAULT_NETCONF_PLUGIN_PATH,
'netconf_plugins',
required_base_class='NetconfBase'
)
inventory_loader = PluginLoader(
'InventoryModule',
'ansible.plugins.inventory',
C.DEFAULT_INVENTORY_PLUGIN_PATH,
'inventory_plugins'
)
httpapi_loader = PluginLoader(
'HttpApi',
'ansible.plugins.httpapi',
C.DEFAULT_HTTPAPI_PLUGIN_PATH,
'httpapi_plugins',
)
|
defionscode/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/network/fortimanager/fortimanager.py
|
64
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2017 Fortinet, Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# check for pyFMG lib
try:
from pyFMG.fortimgr import FortiManager
HAS_PYFMGR = True
except ImportError:
HAS_PYFMGR = False
class AnsibleFortiManager(object):
def __init__(self, module, ip=None, username=None, passwd=None, use_ssl=True, verify_ssl=False, timeout=300):
self.ip = ip
self.username = username
self.passwd = passwd
self.use_ssl = use_ssl
self.verify_ssl = verify_ssl
self.timeout = timeout
self.fmgr_instance = None
if not HAS_PYFMGR:
module.fail_json(msg='Could not import the python library pyFMG required by this module')
self.module = module
def login(self):
if self.ip is not None:
self.fmgr_instance = FortiManager(self.ip, self.username, self.passwd, use_ssl=self.use_ssl,
verify_ssl=self.verify_ssl, timeout=self.timeout, debug=False,
disable_request_warnings=True)
return self.fmgr_instance.login()
def logout(self):
if self.fmgr_instance.sid is not None:
self.fmgr_instance.logout()
def get(self, url, data):
return self.fmgr_instance.get(url, **data)
def set(self, url, data):
return self.fmgr_instance.set(url, **data)
def update(self, url, data):
return self.fmgr_instance.update(url, **data)
def delete(self, url, data):
return self.fmgr_instance.delete(url, **data)
def add(self, url, data):
return self.fmgr_instance.add(url, **data)
def execute(self, url, data):
return self.fmgr_instance.execute(url, **data)
def move(self, url, data):
return self.fmgr_instance.move(url, **data)
def clone(self, url, data):
return self.fmgr_instance.clone(url, **data)
|
indictranstech/ebuy-now-frappe
|
refs/heads/develop
|
frappe/defaults.py
|
4
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.desk.notifications import clear_notifications
# Note: DefaultValue records are identified by parenttype
# __default, __global or 'User Permission'
common_keys = ["__default", "__global"]
def set_user_default(key, value, user=None, parenttype=None):
set_default(key, value, user or frappe.session.user, parenttype)
def add_user_default(key, value, user=None, parenttype=None):
add_default(key, value, user or frappe.session.user, parenttype)
def get_user_default(key, user=None):
user_defaults = get_defaults(user or frappe.session.user)
d = user_defaults.get(key, None)
if is_a_user_permission_key(key):
if d and isinstance(d, (list, tuple)) and len(d)==1:
# Use User Permission value when only when it has a single value
d = d[0]
else:
d = user_defaults.get(frappe.scrub(key), None)
return isinstance(d, (list, tuple)) and d[0] or d
def get_user_default_as_list(key, user=None):
user_defaults = get_defaults(user or frappe.session.user)
d = user_defaults.get(key, None)
if is_a_user_permission_key(key):
if d and isinstance(d, (list, tuple)) and len(d)==1:
# Use User Permission value when only when it has a single value
d = [d[0]]
else:
d = user_defaults.get(frappe.scrub(key), None)
return (not isinstance(d, (list, tuple))) and [d] or d
def is_a_user_permission_key(key):
return ":" not in key and key != frappe.scrub(key)
def get_user_permissions(user=None):
if not user:
user = frappe.session.user
return build_user_permissions(user)
def build_user_permissions(user):
out = frappe.cache().hget("user_permissions", user)
if out==None:
out = {}
for key, value in frappe.db.sql("""select defkey, ifnull(defvalue, '') as defvalue
from tabDefaultValue where parent=%s and parenttype='User Permission'""", (user,)):
out.setdefault(key, []).append(value)
# add profile match
if user not in out.get("User", []):
out.setdefault("User", []).append(user)
frappe.cache().hset("user_permissions", user, out)
return out
def get_defaults(user=None):
globald = get_defaults_for()
if not user:
user = frappe.session.user if frappe.session else "Guest"
if user:
userd = get_defaults_for(user)
userd.update({"user": user, "owner": user})
globald.update(userd)
return globald
def clear_user_default(key, user=None):
clear_default(key, parent=user or frappe.session.user)
# Global
def set_global_default(key, value):
set_default(key, value, "__default")
def add_global_default(key, value):
add_default(key, value, "__default")
def get_global_default(key):
d = get_defaults().get(key, None)
return isinstance(d, list) and d[0] or d
# Common
def set_default(key, value, parent, parenttype="__default"):
"""Override or add a default value.
Adds default value in table `tabDefaultValue`.
:param key: Default key.
:param value: Default value.
:param parent: Usually, **User** to whom the default belongs.
:param parenttype: [optional] default is `__default`."""
frappe.db.sql("""delete from `tabDefaultValue` where defkey=%s and parent=%s""", (key, parent))
add_default(key, value, parent)
def add_default(key, value, parent, parenttype=None):
d = frappe.get_doc({
"doctype": "DefaultValue",
"parent": parent,
"parenttype": parenttype or "__default",
"parentfield": "system_defaults",
"defkey": key,
"defvalue": value
})
d.insert(ignore_permissions=True)
_clear_cache(parent)
def clear_default(key=None, value=None, parent=None, name=None, parenttype=None):
"""Clear a default value by any of the given parameters and delete caches.
:param key: Default key.
:param value: Default value.
:param parent: User name, or `__global`, `__default`.
:param name: Default ID.
:param parenttype: Clear defaults table for a particular type e.g. **User**.
"""
conditions = []
values = []
if name:
conditions.append("name=%s")
values.append(name)
else:
if key:
conditions.append("defkey=%s")
values.append(key)
if value:
conditions.append("defvalue=%s")
values.append(value)
if parent:
conditions.append("parent=%s")
values.append(parent)
if parenttype:
conditions.append("parenttype=%s")
values.append(parenttype)
if parent:
clear_cache(parent)
else:
clear_cache("__default")
clear_cache("__global")
if not conditions:
raise Exception, "[clear_default] No key specified."
frappe.db.sql("""delete from tabDefaultValue where {0}""".format(" and ".join(conditions)),
tuple(values))
_clear_cache(parent)
def get_defaults_for(parent="__default"):
"""get all defaults"""
defaults = frappe.cache().hget("defaults", parent)
if defaults==None:
# sort descending because first default must get precedence
res = frappe.db.sql("""select defkey, defvalue from `tabDefaultValue`
where parent = %s order by creation""", (parent,), as_dict=1)
defaults = frappe._dict({})
for d in res:
if d.defkey in defaults:
# listify
if not isinstance(defaults[d.defkey], list) and defaults[d.defkey] != d.defvalue:
defaults[d.defkey] = [defaults[d.defkey]]
if d.defvalue not in defaults[d.defkey]:
defaults[d.defkey].append(d.defvalue)
elif d.defvalue is not None:
defaults[d.defkey] = d.defvalue
frappe.cache().hset("defaults", parent, defaults)
return defaults
def _clear_cache(parent):
if parent in common_keys:
frappe.clear_cache()
else:
clear_notifications(user=parent)
frappe.clear_cache(user=parent)
def clear_cache(user=None):
if user:
for p in ([user] + common_keys):
frappe.cache().hdel("defaults", p)
elif frappe.flags.in_install!="frappe":
frappe.cache().delete_key("defaults")
|
landryb/QGIS
|
refs/heads/master
|
python/plugins/processing/gui/AlgorithmExecutor.py
|
11
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
AlgorithmExecutor.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import sys
from PyQt4.QtCore import QSettings, QCoreApplication
from qgis.core import QgsFeature, QgsVectorFileWriter
from processing.core.ProcessingLog import ProcessingLog
from processing.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException
from processing.gui.Postprocessing import handleAlgorithmResults
from processing.tools import dataobjects
from processing.tools.system import getTempFilename
from processing.tools import vector
from processing.gui.SilentProgress import SilentProgress
def runalg(alg, progress=None):
"""Executes a given algorithm, showing its progress in the
progress object passed along.
Return true if everything went OK, false if the algorithm
could not be completed.
"""
if progress is None:
progress = SilentProgress()
try:
alg.execute(progress)
return True
except GeoAlgorithmExecutionException as e:
ProcessingLog.addToLog(sys.exc_info()[0], ProcessingLog.LOG_ERROR)
progress.error(e.msg)
return False
def runalgIterating(alg, paramToIter, progress):
# Generate all single-feature layers
settings = QSettings()
systemEncoding = settings.value('/UI/encoding', 'System')
layerfile = alg.getParameterValue(paramToIter)
layer = dataobjects.getObjectFromUri(layerfile, False)
feat = QgsFeature()
filelist = []
outputs = {}
provider = layer.dataProvider()
features = vector.features(layer)
for feat in features:
output = getTempFilename('shp')
filelist.append(output)
writer = QgsVectorFileWriter(output, systemEncoding,
provider.fields(), provider.geometryType(), layer.crs())
writer.addFeature(feat)
del writer
# store output values to use them later as basenames for all outputs
for out in alg.outputs:
outputs[out.name] = out.value
# now run all the algorithms
for i, f in enumerate(filelist):
alg.setParameterValue(paramToIter, f)
for out in alg.outputs:
filename = outputs[out.name]
if filename:
filename = filename[:filename.rfind('.')] + '_' + unicode(i) \
+ filename[filename.rfind('.'):]
out.value = filename
progress.setText(tr('Executing iteration %s/%s...' % (unicode(i), unicode(len(filelist)))))
progress.setPercentage(i * 100 / len(filelist))
if runalg(alg):
handleAlgorithmResults(alg, None, False)
else:
return False
return True
def tr(string, context=''):
if context == '':
context = 'AlgorithmExecutor'
return QCoreApplication.translate(context, string)
|
100star/h2o
|
refs/heads/master
|
py/testdir_single_jvm/rf_simple_example.py
|
9
|
import sys
import json
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_import as h2i, h2o_args
#
# This is intended to be the simplest possible RF example.
# Look at sandbox/commands.log for REST API requests to H2O.
#
print "--------------------------------------------------------------------------------"
print "BUILDING CLOUD"
print "--------------------------------------------------------------------------------"
h2o_args.parse_our_args()
h2o.init(node_count=2, java_heap_GB=2)
print "--------------------------------------------------------------------------------"
print "PARSING DATASET"
print "--------------------------------------------------------------------------------"
#
# What this really ends up doing is a REST API PostFile.json request.
#
csvPathname = 'iris/iris2.csv'
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, schema='put')
print "--------------------------------------------------------------------------------"
print "RUNNING RF"
print "--------------------------------------------------------------------------------"
#
# For valid kwargs, look at h2o.py random_forest() params_dict variable.
timeoutSecs = 20
if (h2o.beta_features):
kwargs = {"ntrees": 6}
else:
kwargs = {"ntree": 6}
rf_json_response = h2o_cmd.runRF(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
print json.dumps(rf_json_response, indent=4)
print "--------------------------------------------------------------------------------"
print "SHUTTING DOWN"
print "--------------------------------------------------------------------------------"
h2o.check_sandbox_for_errors()
h2o.tear_down_cloud()
|
kevinmel2000/sl4a
|
refs/heads/master
|
python/src/Lib/test/test_support.py
|
48
|
"""Supporting definitions for the Python regression tests."""
if __name__ != 'test.test_support':
raise ImportError('test_support must be imported from the test package')
import contextlib
import errno
import socket
import sys
import os
import shutil
import warnings
import unittest
__all__ = ["Error", "TestFailed", "TestSkipped", "ResourceDenied", "import_module",
"verbose", "use_resources", "max_memuse", "record_original_stdout",
"get_original_stdout", "unload", "unlink", "rmtree", "forget",
"is_resource_enabled", "requires", "find_unused_port", "bind_port",
"fcmp", "have_unicode", "is_jython", "TESTFN", "HOST", "FUZZ",
"findfile", "verify", "vereq", "sortdict", "check_syntax_error",
"open_urlresource", "check_warnings", "CleanImport",
"EnvironmentVarGuard", "captured_output",
"captured_stdout", "TransientResource", "transient_internet",
"run_with_locale", "set_memlimit", "bigmemtest", "bigaddrspacetest",
"BasicTestRunner", "run_unittest", "run_doctest", "threading_setup",
"threading_cleanup", "reap_children"]
class Error(Exception):
"""Base class for regression test exceptions."""
class TestFailed(Error):
"""Test failed."""
class TestSkipped(Error):
"""Test skipped.
This can be raised to indicate that a test was deliberatly
skipped, but not because a feature wasn't available. For
example, if some resource can't be used, such as the network
appears to be unavailable, this should be raised instead of
TestFailed.
"""
class ResourceDenied(TestSkipped):
"""Test skipped because it requested a disallowed resource.
This is raised when a test calls requires() for a resource that
has not be enabled. It is used to distinguish between expected
and unexpected skips.
"""
def import_module(name, deprecated=False):
"""Import the module to be tested, raising TestSkipped if it is not
available."""
with warnings.catch_warnings():
if deprecated:
warnings.filterwarnings("ignore", ".+ (module|package)",
DeprecationWarning)
try:
module = __import__(name, level=0)
except ImportError:
raise TestSkipped("No module named " + name)
else:
return module
verbose = 1 # Flag set to 0 by regrtest.py
use_resources = None # Flag set to [] by regrtest.py
max_memuse = 0 # Disable bigmem tests (they will still be run with
# small sizes, to make sure they work.)
real_max_memuse = 0
# _original_stdout is meant to hold stdout at the time regrtest began.
# This may be "the real" stdout, or IDLE's emulation of stdout, or whatever.
# The point is to have some flavor of stdout the user can actually see.
_original_stdout = None
def record_original_stdout(stdout):
global _original_stdout
_original_stdout = stdout
def get_original_stdout():
return _original_stdout or sys.stdout
def unload(name):
try:
del sys.modules[name]
except KeyError:
pass
def unlink(filename):
try:
os.unlink(filename)
except OSError:
pass
def rmtree(path):
try:
shutil.rmtree(path)
except OSError, e:
# Unix returns ENOENT, Windows returns ESRCH.
if e.errno not in (errno.ENOENT, errno.ESRCH):
raise
def forget(modname):
'''"Forget" a module was ever imported by removing it from sys.modules and
deleting any .pyc and .pyo files.'''
unload(modname)
for dirname in sys.path:
unlink(os.path.join(dirname, modname + os.extsep + 'pyc'))
# Deleting the .pyo file cannot be within the 'try' for the .pyc since
# the chance exists that there is no .pyc (and thus the 'try' statement
# is exited) but there is a .pyo file.
unlink(os.path.join(dirname, modname + os.extsep + 'pyo'))
def is_resource_enabled(resource):
"""Test whether a resource is enabled. Known resources are set by
regrtest.py."""
return use_resources is not None and resource in use_resources
def requires(resource, msg=None):
"""Raise ResourceDenied if the specified resource is not available.
If the caller's module is __main__ then automatically return True. The
possibility of False being returned occurs when regrtest.py is executing."""
# see if the caller's module is __main__ - if so, treat as if
# the resource was set
if sys._getframe().f_back.f_globals.get("__name__") == "__main__":
return
if not is_resource_enabled(resource):
if msg is None:
msg = "Use of the `%s' resource not enabled" % resource
raise ResourceDenied(msg)
HOST = 'localhost'
def find_unused_port(family=socket.AF_INET, socktype=socket.SOCK_STREAM):
"""Returns an unused port that should be suitable for binding. This is
achieved by creating a temporary socket with the same family and type as
the 'sock' parameter (default is AF_INET, SOCK_STREAM), and binding it to
the specified host address (defaults to 0.0.0.0) with the port set to 0,
eliciting an unused ephemeral port from the OS. The temporary socket is
then closed and deleted, and the ephemeral port is returned.
Either this method or bind_port() should be used for any tests where a
server socket needs to be bound to a particular port for the duration of
the test. Which one to use depends on whether the calling code is creating
a python socket, or if an unused port needs to be provided in a constructor
or passed to an external program (i.e. the -accept argument to openssl's
s_server mode). Always prefer bind_port() over find_unused_port() where
possible. Hard coded ports should *NEVER* be used. As soon as a server
socket is bound to a hard coded port, the ability to run multiple instances
of the test simultaneously on the same host is compromised, which makes the
test a ticking time bomb in a buildbot environment. On Unix buildbots, this
may simply manifest as a failed test, which can be recovered from without
intervention in most cases, but on Windows, the entire python process can
completely and utterly wedge, requiring someone to log in to the buildbot
and manually kill the affected process.
(This is easy to reproduce on Windows, unfortunately, and can be traced to
the SO_REUSEADDR socket option having different semantics on Windows versus
Unix/Linux. On Unix, you can't have two AF_INET SOCK_STREAM sockets bind,
listen and then accept connections on identical host/ports. An EADDRINUSE
socket.error will be raised at some point (depending on the platform and
the order bind and listen were called on each socket).
However, on Windows, if SO_REUSEADDR is set on the sockets, no EADDRINUSE
will ever be raised when attempting to bind two identical host/ports. When
accept() is called on each socket, the second caller's process will steal
the port from the first caller, leaving them both in an awkwardly wedged
state where they'll no longer respond to any signals or graceful kills, and
must be forcibly killed via OpenProcess()/TerminateProcess().
The solution on Windows is to use the SO_EXCLUSIVEADDRUSE socket option
instead of SO_REUSEADDR, which effectively affords the same semantics as
SO_REUSEADDR on Unix. Given the propensity of Unix developers in the Open
Source world compared to Windows ones, this is a common mistake. A quick
look over OpenSSL's 0.9.8g source shows that they use SO_REUSEADDR when
openssl.exe is called with the 's_server' option, for example. See
http://bugs.python.org/issue2550 for more info. The following site also
has a very thorough description about the implications of both REUSEADDR
and EXCLUSIVEADDRUSE on Windows:
http://msdn2.microsoft.com/en-us/library/ms740621(VS.85).aspx)
XXX: although this approach is a vast improvement on previous attempts to
elicit unused ports, it rests heavily on the assumption that the ephemeral
port returned to us by the OS won't immediately be dished back out to some
other process when we close and delete our temporary socket but before our
calling code has a chance to bind the returned port. We can deal with this
issue if/when we come across it."""
tempsock = socket.socket(family, socktype)
port = bind_port(tempsock)
tempsock.close()
del tempsock
return port
def bind_port(sock, host=HOST):
"""Bind the socket to a free port and return the port number. Relies on
ephemeral ports in order to ensure we are using an unbound port. This is
important as many tests may be running simultaneously, especially in a
buildbot environment. This method raises an exception if the sock.family
is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR
or SO_REUSEPORT set on it. Tests should *never* set these socket options
for TCP/IP sockets. The only case for setting these options is testing
multicasting via multiple UDP sockets.
Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e.
on Windows), it will be set on the socket. This will prevent anyone else
from bind()'ing to our host/port for the duration of the test.
"""
if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM:
if hasattr(socket, 'SO_REUSEADDR'):
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1:
raise TestFailed("tests should never set the SO_REUSEADDR " \
"socket option on TCP/IP sockets!")
if hasattr(socket, 'SO_REUSEPORT'):
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 1:
raise TestFailed("tests should never set the SO_REUSEPORT " \
"socket option on TCP/IP sockets!")
if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
sock.bind((host, 0))
port = sock.getsockname()[1]
return port
FUZZ = 1e-6
def fcmp(x, y): # fuzzy comparison function
if isinstance(x, float) or isinstance(y, float):
try:
fuzz = (abs(x) + abs(y)) * FUZZ
if abs(x-y) <= fuzz:
return 0
except:
pass
elif type(x) == type(y) and isinstance(x, (tuple, list)):
for i in range(min(len(x), len(y))):
outcome = fcmp(x[i], y[i])
if outcome != 0:
return outcome
return (len(x) > len(y)) - (len(x) < len(y))
return (x > y) - (x < y)
try:
unicode
have_unicode = True
except NameError:
have_unicode = False
is_jython = sys.platform.startswith('java')
# Filename used for testing
if os.name == 'java':
# Jython disallows @ in module names
TESTFN = '$test'
elif os.name == 'riscos':
TESTFN = 'testfile'
else:
TESTFN = '@test'
# Unicode name only used if TEST_FN_ENCODING exists for the platform.
if have_unicode:
# Assuming sys.getfilesystemencoding()!=sys.getdefaultencoding()
# TESTFN_UNICODE is a filename that can be encoded using the
# file system encoding, but *not* with the default (ascii) encoding
if isinstance('', unicode):
# python -U
# XXX perhaps unicode() should accept Unicode strings?
TESTFN_UNICODE = "@test-\xe0\xf2"
else:
# 2 latin characters.
TESTFN_UNICODE = unicode("@test-\xe0\xf2", "latin-1")
TESTFN_ENCODING = sys.getfilesystemencoding()
# TESTFN_UNICODE_UNENCODEABLE is a filename that should *not* be
# able to be encoded by *either* the default or filesystem encoding.
# This test really only makes sense on Windows NT platforms
# which have special Unicode support in posixmodule.
if (not hasattr(sys, "getwindowsversion") or
sys.getwindowsversion()[3] < 2): # 0=win32s or 1=9x/ME
TESTFN_UNICODE_UNENCODEABLE = None
else:
# Japanese characters (I think - from bug 846133)
TESTFN_UNICODE_UNENCODEABLE = eval('u"@test-\u5171\u6709\u3055\u308c\u308b"')
try:
# XXX - Note - should be using TESTFN_ENCODING here - but for
# Windows, "mbcs" currently always operates as if in
# errors=ignore' mode - hence we get '?' characters rather than
# the exception. 'Latin1' operates as we expect - ie, fails.
# See [ 850997 ] mbcs encoding ignores errors
TESTFN_UNICODE_UNENCODEABLE.encode("Latin1")
except UnicodeEncodeError:
pass
else:
print \
'WARNING: The filename %r CAN be encoded by the filesystem. ' \
'Unicode filename tests may not be effective' \
% TESTFN_UNICODE_UNENCODEABLE
# Make sure we can write to TESTFN, try in /tmp if we can't
fp = None
try:
fp = open(TESTFN, 'w+')
except IOError:
TMP_TESTFN = os.path.join('/tmp', TESTFN)
try:
fp = open(TMP_TESTFN, 'w+')
TESTFN = TMP_TESTFN
del TMP_TESTFN
except IOError:
print ('WARNING: tests will fail, unable to write to: %s or %s' %
(TESTFN, TMP_TESTFN))
if fp is not None:
fp.close()
unlink(TESTFN)
del fp
def findfile(file, here=__file__):
"""Try to find a file on sys.path and the working directory. If it is not
found the argument passed to the function is returned (this does not
necessarily signal failure; could still be the legitimate path)."""
if os.path.isabs(file):
return file
path = sys.path
path = [os.path.dirname(here)] + path
for dn in path:
fn = os.path.join(dn, file)
if os.path.exists(fn): return fn
return file
def verify(condition, reason='test failed'):
"""Verify that condition is true. If not, raise TestFailed.
The optional argument reason can be given to provide
a better error text.
"""
if not condition:
raise TestFailed(reason)
def vereq(a, b):
"""Raise TestFailed if a == b is false.
This is better than verify(a == b) because, in case of failure, the
error message incorporates repr(a) and repr(b) so you can see the
inputs.
Note that "not (a == b)" isn't necessarily the same as "a != b"; the
former is tested.
"""
if not (a == b):
raise TestFailed("%r == %r" % (a, b))
def sortdict(dict):
"Like repr(dict), but in sorted order."
items = dict.items()
items.sort()
reprpairs = ["%r: %r" % pair for pair in items]
withcommas = ", ".join(reprpairs)
return "{%s}" % withcommas
def make_bad_fd():
"""
Create an invalid file descriptor by opening and closing a file and return
its fd.
"""
file = open(TESTFN, "wb")
try:
return file.fileno()
finally:
file.close()
unlink(TESTFN)
def check_syntax_error(testcase, statement):
try:
compile(statement, '<test string>', 'exec')
except SyntaxError:
pass
else:
testcase.fail('Missing SyntaxError: "%s"' % statement)
def open_urlresource(url):
import urllib, urlparse
requires('urlfetch')
filename = urlparse.urlparse(url)[2].split('/')[-1] # '/': it's URL!
for path in [os.path.curdir, os.path.pardir]:
fn = os.path.join(path, filename)
if os.path.exists(fn):
return open(fn)
print >> get_original_stdout(), '\tfetching %s ...' % url
fn, _ = urllib.urlretrieve(url, filename)
return open(fn)
class WarningsRecorder(object):
"""Convenience wrapper for the warnings list returned on
entry to the warnings.catch_warnings() context manager.
"""
def __init__(self, warnings_list):
self.warnings = warnings_list
def __getattr__(self, attr):
if self.warnings:
return getattr(self.warnings[-1], attr)
elif attr in warnings.WarningMessage._WARNING_DETAILS:
return None
raise AttributeError("%r has no attribute %r" % (self, attr))
def reset(self):
del self.warnings[:]
@contextlib.contextmanager
def check_warnings():
with warnings.catch_warnings(record=True) as w:
yield WarningsRecorder(w)
class CleanImport(object):
"""Context manager to force import to return a new module reference.
This is useful for testing module-level behaviours, such as
the emission of a DeprecationWarning on import.
Use like this:
with CleanImport("foo"):
__import__("foo") # new reference
"""
def __init__(self, *module_names):
self.original_modules = sys.modules.copy()
for module_name in module_names:
if module_name in sys.modules:
module = sys.modules[module_name]
# It is possible that module_name is just an alias for
# another module (e.g. stub for modules renamed in 3.x).
# In that case, we also need delete the real module to clear
# the import cache.
if module.__name__ != module_name:
del sys.modules[module.__name__]
del sys.modules[module_name]
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
sys.modules.update(self.original_modules)
class EnvironmentVarGuard(object):
"""Class to help protect the environment variable properly. Can be used as
a context manager."""
def __init__(self):
self._environ = os.environ
self._unset = set()
self._reset = dict()
def set(self, envvar, value):
if envvar not in self._environ:
self._unset.add(envvar)
else:
self._reset[envvar] = self._environ[envvar]
self._environ[envvar] = value
def unset(self, envvar):
if envvar in self._environ:
self._reset[envvar] = self._environ[envvar]
del self._environ[envvar]
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
for envvar, value in self._reset.iteritems():
self._environ[envvar] = value
for unset in self._unset:
del self._environ[unset]
class TransientResource(object):
"""Raise ResourceDenied if an exception is raised while the context manager
is in effect that matches the specified exception and attributes."""
def __init__(self, exc, **kwargs):
self.exc = exc
self.attrs = kwargs
def __enter__(self):
return self
def __exit__(self, type_=None, value=None, traceback=None):
"""If type_ is a subclass of self.exc and value has attributes matching
self.attrs, raise ResourceDenied. Otherwise let the exception
propagate (if any)."""
if type_ is not None and issubclass(self.exc, type_):
for attr, attr_value in self.attrs.iteritems():
if not hasattr(value, attr):
break
if getattr(value, attr) != attr_value:
break
else:
raise ResourceDenied("an optional resource is not available")
def transient_internet():
"""Return a context manager that raises ResourceDenied when various issues
with the Internet connection manifest themselves as exceptions."""
time_out = TransientResource(IOError, errno=errno.ETIMEDOUT)
socket_peer_reset = TransientResource(socket.error, errno=errno.ECONNRESET)
ioerror_peer_reset = TransientResource(IOError, errno=errno.ECONNRESET)
return contextlib.nested(time_out, socket_peer_reset, ioerror_peer_reset)
@contextlib.contextmanager
def captured_output(stream_name):
"""Run the 'with' statement body using a StringIO object in place of a
specific attribute on the sys module.
Example use (with 'stream_name=stdout')::
with captured_stdout() as s:
print "hello"
assert s.getvalue() == "hello"
"""
import StringIO
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, StringIO.StringIO())
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
return captured_output("stdout")
#=======================================================================
# Decorator for running a function in a different locale, correctly resetting
# it afterwards.
def run_with_locale(catstr, *locales):
def decorator(func):
def inner(*args, **kwds):
try:
import locale
category = getattr(locale, catstr)
orig_locale = locale.setlocale(category)
except AttributeError:
# if the test author gives us an invalid category string
raise
except:
# cannot retrieve original locale, so do nothing
locale = orig_locale = None
else:
for loc in locales:
try:
locale.setlocale(category, loc)
break
except:
pass
# now run the function, resetting the locale on exceptions
try:
return func(*args, **kwds)
finally:
if locale and orig_locale:
locale.setlocale(category, orig_locale)
inner.func_name = func.func_name
inner.__doc__ = func.__doc__
return inner
return decorator
#=======================================================================
# Big-memory-test support. Separate from 'resources' because memory use should be configurable.
# Some handy shorthands. Note that these are used for byte-limits as well
# as size-limits, in the various bigmem tests
_1M = 1024*1024
_1G = 1024 * _1M
_2G = 2 * _1G
_4G = 4 * _1G
MAX_Py_ssize_t = sys.maxsize
def set_memlimit(limit):
import re
global max_memuse
global real_max_memuse
sizes = {
'k': 1024,
'm': _1M,
'g': _1G,
't': 1024*_1G,
}
m = re.match(r'(\d+(\.\d+)?) (K|M|G|T)b?$', limit,
re.IGNORECASE | re.VERBOSE)
if m is None:
raise ValueError('Invalid memory limit %r' % (limit,))
memlimit = int(float(m.group(1)) * sizes[m.group(3).lower()])
real_max_memuse = memlimit
if memlimit > MAX_Py_ssize_t:
memlimit = MAX_Py_ssize_t
if memlimit < _2G - 1:
raise ValueError('Memory limit %r too low to be useful' % (limit,))
max_memuse = memlimit
def bigmemtest(minsize, memuse, overhead=5*_1M):
"""Decorator for bigmem tests.
'minsize' is the minimum useful size for the test (in arbitrary,
test-interpreted units.) 'memuse' is the number of 'bytes per size' for
the test, or a good estimate of it. 'overhead' specifies fixed overhead,
independent of the testsize, and defaults to 5Mb.
The decorator tries to guess a good value for 'size' and passes it to
the decorated test function. If minsize * memuse is more than the
allowed memory use (as defined by max_memuse), the test is skipped.
Otherwise, minsize is adjusted upward to use up to max_memuse.
"""
def decorator(f):
def wrapper(self):
if not max_memuse:
# If max_memuse is 0 (the default),
# we still want to run the tests with size set to a few kb,
# to make sure they work. We still want to avoid using
# too much memory, though, but we do that noisily.
maxsize = 5147
self.failIf(maxsize * memuse + overhead > 20 * _1M)
else:
maxsize = int((max_memuse - overhead) / memuse)
if maxsize < minsize:
# Really ought to print 'test skipped' or something
if verbose:
sys.stderr.write("Skipping %s because of memory "
"constraint\n" % (f.__name__,))
return
# Try to keep some breathing room in memory use
maxsize = max(maxsize - 50 * _1M, minsize)
return f(self, maxsize)
wrapper.minsize = minsize
wrapper.memuse = memuse
wrapper.overhead = overhead
return wrapper
return decorator
def precisionbigmemtest(size, memuse, overhead=5*_1M):
def decorator(f):
def wrapper(self):
if not real_max_memuse:
maxsize = 5147
else:
maxsize = size
if real_max_memuse and real_max_memuse < maxsize * memuse:
if verbose:
sys.stderr.write("Skipping %s because of memory "
"constraint\n" % (f.__name__,))
return
return f(self, maxsize)
wrapper.size = size
wrapper.memuse = memuse
wrapper.overhead = overhead
return wrapper
return decorator
def bigaddrspacetest(f):
"""Decorator for tests that fill the address space."""
def wrapper(self):
if max_memuse < MAX_Py_ssize_t:
if verbose:
sys.stderr.write("Skipping %s because of memory "
"constraint\n" % (f.__name__,))
else:
return f(self)
return wrapper
#=======================================================================
# unittest integration.
class BasicTestRunner:
def run(self, test):
result = unittest.TestResult()
test(result)
return result
def _run_suite(suite):
"""Run tests from a unittest.TestSuite-derived class."""
if verbose:
runner = unittest.TextTestRunner(sys.stdout, verbosity=2)
else:
runner = BasicTestRunner()
result = runner.run(suite)
if not result.wasSuccessful():
if len(result.errors) == 1 and not result.failures:
err = result.errors[0][1]
elif len(result.failures) == 1 and not result.errors:
err = result.failures[0][1]
else:
err = "errors occurred; run in verbose mode for details"
raise TestFailed(err)
def run_unittest(*classes):
"""Run tests from unittest.TestCase-derived classes."""
valid_types = (unittest.TestSuite, unittest.TestCase)
suite = unittest.TestSuite()
for cls in classes:
if isinstance(cls, str):
if cls in sys.modules:
suite.addTest(unittest.findTestCases(sys.modules[cls]))
else:
raise ValueError("str arguments must be keys in sys.modules")
elif isinstance(cls, valid_types):
suite.addTest(cls)
else:
suite.addTest(unittest.makeSuite(cls))
_run_suite(suite)
#=======================================================================
# doctest driver.
def run_doctest(module, verbosity=None):
"""Run doctest on the given module. Return (#failures, #tests).
If optional argument verbosity is not specified (or is None), pass
test_support's belief about verbosity on to doctest. Else doctest's
usual behavior is used (it searches sys.argv for -v).
"""
import doctest
if verbosity is None:
verbosity = verbose
else:
verbosity = None
# Direct doctest output (normally just errors) to real stdout; doctest
# output shouldn't be compared by regrtest.
save_stdout = sys.stdout
sys.stdout = get_original_stdout()
try:
f, t = doctest.testmod(module, verbose=verbosity)
if f:
raise TestFailed("%d of %d doctests failed" % (f, t))
finally:
sys.stdout = save_stdout
if verbose:
print 'doctest (%s) ... %d tests with zero failures' % (module.__name__, t)
return f, t
#=======================================================================
# Threading support to prevent reporting refleaks when running regrtest.py -R
def threading_setup():
import threading
return len(threading._active), len(threading._limbo)
def threading_cleanup(num_active, num_limbo):
import threading
import time
_MAX_COUNT = 10
count = 0
while len(threading._active) != num_active and count < _MAX_COUNT:
count += 1
time.sleep(0.1)
count = 0
while len(threading._limbo) != num_limbo and count < _MAX_COUNT:
count += 1
time.sleep(0.1)
def reap_children():
"""Use this function at the end of test_main() whenever sub-processes
are started. This will help ensure that no extra children (zombies)
stick around to hog resources and create problems when looking
for refleaks.
"""
# Reap all our dead child processes so we don't leave zombies around.
# These hog resources and might be causing some of the buildbots to die.
if hasattr(os, 'waitpid'):
any_process = -1
while True:
try:
# This will raise an exception on Windows. That's ok.
pid, status = os.waitpid(any_process, os.WNOHANG)
if pid == 0:
break
except:
break
|
ravello/ansible
|
refs/heads/devel
|
docsite/_themes/srtd/__init__.py
|
1504
|
"""Sphinx ReadTheDocs theme.
From https://github.com/ryan-roemer/sphinx-bootstrap-theme.
"""
import os
VERSION = (0, 1, 5)
__version__ = ".".join(str(v) for v in VERSION)
__version_full__ = __version__
def get_html_theme_path():
"""Return list of HTML theme paths."""
cur_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
return cur_dir
|
blinkboxbooks/android-app
|
refs/heads/master
|
app/scripts/itemizer.py
|
1
|
#!/usr/bin/env python
import sys
outdata = ''
def printitem(inputstring):
global outdata
inputstring = inputstring.strip()
name = inputstring[:inputstring.find("=")]
item = inputstring[inputstring.find('"')+1:-1]
outdata += '<item name="' + name + '">' + item + '</item>\n'
for line in iter(sys.stdin.readline, ""):
if (len(line) < 2):
print outdata
outdata = ''
else:
printitem(line)
|
NervanaSystems/neon
|
refs/heads/master
|
neon/util/shelver.py
|
1
|
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from contextlib import contextmanager
import shelve
from filelock import FileLock
_locks = {}
def _lock(filename):
try:
return _locks[filename]
except KeyError:
_locks[filename] = FileLock(filename + '.lock')
return _locks[filename]
@contextmanager
def atomic_shelve(filename):
with _lock(filename):
shelved = shelve.open(filename)
yield shelved
shelved.close()
|
tuxerman/cdn-old
|
refs/heads/master
|
cdn/openstack/common/timeutils.py
|
20
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Time related utilities and helper functions.
"""
import calendar
import datetime
import time
import iso8601
import six
# ISO 8601 extended time format with microseconds
_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f'
_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND
def isotime(at=None, subsecond=False):
"""Stringify time in ISO 8601 format."""
if not at:
at = utcnow()
st = at.strftime(_ISO8601_TIME_FORMAT
if not subsecond
else _ISO8601_TIME_FORMAT_SUBSECOND)
tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
st += ('Z' if tz == 'UTC' else tz)
return st
def parse_isotime(timestr):
"""Parse time from ISO 8601 format."""
try:
return iso8601.parse_date(timestr)
except iso8601.ParseError as e:
raise ValueError(unicode(e))
except TypeError as e:
raise ValueError(unicode(e))
def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
"""Returns formatted utcnow."""
if not at:
at = utcnow()
return at.strftime(fmt)
def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):
"""Turn a formatted time back into a datetime."""
return datetime.datetime.strptime(timestr, fmt)
def normalize_time(timestamp):
"""Normalize time in arbitrary timezone to UTC naive object."""
offset = timestamp.utcoffset()
if offset is None:
return timestamp
return timestamp.replace(tzinfo=None) - offset
def is_older_than(before, seconds):
"""Return True if before is older than seconds."""
if isinstance(before, six.string_types):
before = parse_strtime(before).replace(tzinfo=None)
return utcnow() - before > datetime.timedelta(seconds=seconds)
def is_newer_than(after, seconds):
"""Return True if after is newer than seconds."""
if isinstance(after, six.string_types):
after = parse_strtime(after).replace(tzinfo=None)
return after - utcnow() > datetime.timedelta(seconds=seconds)
def utcnow_ts():
"""Timestamp version of our utcnow function."""
if utcnow.override_time is None:
# NOTE(kgriffs): This is several times faster
# than going through calendar.timegm(...)
return int(time.time())
return calendar.timegm(utcnow().timetuple())
def utcnow():
"""Overridable version of utils.utcnow."""
if utcnow.override_time:
try:
return utcnow.override_time.pop(0)
except AttributeError:
return utcnow.override_time
return datetime.datetime.utcnow()
def iso8601_from_timestamp(timestamp):
"""Returns a iso8601 formated date from timestamp."""
return isotime(datetime.datetime.utcfromtimestamp(timestamp))
utcnow.override_time = None
def set_time_override(override_time=None):
"""Overrides utils.utcnow.
Make it return a constant time or a list thereof, one at a time.
:param override_time: datetime instance or list thereof. If not
given, defaults to the current UTC time.
"""
utcnow.override_time = override_time or datetime.datetime.utcnow()
def advance_time_delta(timedelta):
"""Advance overridden time using a datetime.timedelta."""
assert(not utcnow.override_time is None)
try:
for dt in utcnow.override_time:
dt += timedelta
except TypeError:
utcnow.override_time += timedelta
def advance_time_seconds(seconds):
"""Advance overridden time by seconds."""
advance_time_delta(datetime.timedelta(0, seconds))
def clear_time_override():
"""Remove the overridden time."""
utcnow.override_time = None
def marshall_now(now=None):
"""Make an rpc-safe datetime with microseconds.
Note: tzinfo is stripped, but not required for relative times.
"""
if not now:
now = utcnow()
return dict(day=now.day, month=now.month, year=now.year, hour=now.hour,
minute=now.minute, second=now.second,
microsecond=now.microsecond)
def unmarshall_time(tyme):
"""Unmarshall a datetime dict."""
return datetime.datetime(day=tyme['day'],
month=tyme['month'],
year=tyme['year'],
hour=tyme['hour'],
minute=tyme['minute'],
second=tyme['second'],
microsecond=tyme['microsecond'])
def delta_seconds(before, after):
"""Return the difference between two timing objects.
Compute the difference in seconds between two date, time, or
datetime objects (as a float, to microsecond resolution).
"""
delta = after - before
try:
return delta.total_seconds()
except AttributeError:
return ((delta.days * 24 * 3600) + delta.seconds +
float(delta.microseconds) / (10 ** 6))
def is_soon(dt, window):
"""Determines if time is going to happen in the next window seconds.
:params dt: the time
:params window: minimum seconds to remain to consider the time not soon
:return: True if expiration is within the given duration
"""
soon = (utcnow() + datetime.timedelta(seconds=window))
return normalize_time(dt) <= soon
|
jeremyfix/pylearn2
|
refs/heads/master
|
pylearn2/testing/datasets.py
|
7
|
""" Simple datasets to be used for unit tests. """
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
import numpy as np
from theano.compat.six.moves import xrange
from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix
class ArangeDataset(DenseDesignMatrix):
"""
A dataset where example i is just the number i. Makes it easy to track
which sets of examples are visited.
Parameters
----------
num_examples : WRITEME
"""
def __init__(self, num_examples):
X = np.zeros((num_examples, 1))
X[:, 0] = np.arange(num_examples)
super(ArangeDataset, self).__init__(X)
def random_dense_design_matrix(rng, num_examples, dim, num_classes):
"""
Creates a random dense design matrix that has class labels.
Parameters
----------
rng : numpy.random.RandomState
The random number generator used to generate the dataset.
num_examples : int
The number of examples to create.
dim : int
The number of features in each example.
num_classes : int
The number of classes to assign the examples to.
0 indicates that no class labels will be generated.
"""
X = rng.randn(num_examples, dim)
if num_classes:
Y = rng.randint(0, num_classes, (num_examples, 1))
y_labels = num_classes
else:
Y = None
y_labels = None
return DenseDesignMatrix(X=X, y=Y, y_labels=y_labels)
def random_one_hot_dense_design_matrix(rng, num_examples, dim, num_classes):
X = rng.randn(num_examples, dim)
idx = rng.randint(0, num_classes, (num_examples, ))
Y = np.zeros((num_examples, num_classes))
for i in xrange(num_examples):
Y[i, idx[i]] = 1
return DenseDesignMatrix(X=X, y=Y)
def random_one_hot_topological_dense_design_matrix(rng,
num_examples,
shape,
channels,
axes,
num_classes):
dims = {'b': num_examples,
'c': channels}
for i, dim in enumerate(shape):
dims[i] = dim
shape = [dims[axis] for axis in axes]
X = rng.randn(*shape)
idx = rng.randint(0, num_classes, (num_examples,))
Y = np.zeros((num_examples, num_classes))
for i in xrange(num_examples):
Y[i, idx[i]] = 1
return DenseDesignMatrix(topo_view=X, axes=axes, y=Y)
|
sahutd/youtube-dl
|
refs/heads/master
|
test/__init__.py
|
12133432
| |
dantebarba/docker-media-server
|
refs/heads/master
|
plex/Sub-Zero.bundle/Contents/Libraries/Shared/fcache/__init__.py
|
12133432
| |
TridevGuha/django
|
refs/heads/master
|
tests/view_tests/app3/__init__.py
|
12133432
| |
MalloyPower/parsing-python
|
refs/heads/master
|
front-end/testsuite-python-lib/Python-2.7.2/Lib/lib-tk/test/test_ttk/__init__.py
|
12133432
| |
valintinr/opennode-management
|
refs/heads/master
|
opennode/oms/backend/__init__.py
|
12133432
| |
mgit-at/ansible
|
refs/heads/devel
|
test/legacy/setup_gce.py
|
185
|
'''
Create GCE resources for use in integration tests.
Takes a prefix as a command-line argument and creates two persistent disks named
${prefix}-base and ${prefix}-extra and a snapshot of the base disk named
${prefix}-snapshot. prefix will be forced to lowercase, to ensure the names are
legal GCE resource names.
'''
import gce_credentials
import optparse
import sys
def parse_args():
parser = optparse.OptionParser(
usage="%s [options] <prefix>" % (sys.argv[0],), description=__doc__
)
gce_credentials.add_credentials_options(parser)
parser.add_option(
"--prefix",
action="store",
dest="prefix",
help="String used to prefix GCE resource names (default: %default)"
)
(opts, args) = parser.parse_args()
gce_credentials.check_required(opts, parser)
if not args:
parser.error("Missing required argument: name prefix")
return (opts, args)
if __name__ == '__main__':
(opts, args) = parse_args()
gce = gce_credentials.get_gce_driver(opts)
prefix = args[0].lower()
try:
base_volume = gce.create_volume(
size=10, name=prefix + '-base', location='us-central1-a')
gce.create_volume_snapshot(base_volume, name=prefix + '-snapshot')
gce.create_volume(size=10, name=prefix + '-extra', location='us-central1-a')
except KeyboardInterrupt as e:
print("\nExiting on user command.")
|
bmwiedemann/osc
|
refs/heads/master
|
osc/meter.py
|
5
|
# Copyright (C) 2018 SUSE Linux. All rights reserved.
# This program is free software; it may be used, copied, modified
# and distributed under the terms of the GNU General Public Licence,
# either version 2, or (at your option) any later version.
try:
import progressbar as pb
have_pb_module = True
except ImportError:
have_pb_module = False
class PBTextMeter(object):
def start(self, basename, size=None):
if size is None:
widgets = [basename + ': ', pb.AnimatedMarker(), ' ', pb.Timer()]
self.bar = pb.ProgressBar(widgets=widgets, maxval=pb.UnknownLength)
else:
widgets = [basename + ': ', pb.Bar(), ' ', pb.ETA()]
if size:
# if size is 0, using pb.Percentage will result in
# a ZeroDivisionException
widgets.insert(1, pb.Percentage())
self.bar = pb.ProgressBar(widgets=widgets, maxval=size)
self.bar.start()
def update(self, amount_read):
self.bar.update(amount_read)
def end(self):
self.bar.finish()
class NoPBTextMeter(object):
_complained = False
def start(self, basename, size=None):
if not self._complained:
print('Please install the progressbar module')
NoPBTextMeter._complained = True
print('Processing: %s' % basename)
def update(self, *args, **kwargs):
pass
def end(self, *args, **kwargs):
pass
def create_text_meter(*args, **kwargs):
use_pb_fallback = kwargs.pop('use_pb_fallback', True)
if have_pb_module or use_pb_fallback:
return TextMeter(*args, **kwargs)
return None
if have_pb_module:
TextMeter = PBTextMeter
else:
TextMeter = NoPBTextMeter
# vim: sw=4 et
|
axbaretto/beam
|
refs/heads/master
|
sdks/python/.tox/lint/lib/python2.7/site-packages/hamcrest/core/base_matcher.py
|
6
|
from __future__ import absolute_import
__author__ = "Jon Reid"
__copyright__ = "Copyright 2011 hamcrest.org"
__license__ = "BSD, see License.txt"
from hamcrest.core.matcher import Matcher
from hamcrest.core.string_description import tostring
class BaseMatcher(Matcher):
"""Base class for all :py:class:`~hamcrest.core.matcher.Matcher`
implementations.
Most implementations can just implement :py:obj:`_matches`, leaving the
handling of any mismatch description to the ``matches`` method. But if it
makes more sense to generate the mismatch description during the matching,
override :py:meth:`~hamcrest.core.matcher.Matcher.matches` instead.
"""
def __str__(self):
return tostring(self)
def _matches(self, item):
raise NotImplementedError('_matches')
def matches(self, item, mismatch_description=None):
match_result = self._matches(item)
if not match_result and mismatch_description:
self.describe_mismatch(item, mismatch_description)
return match_result
def describe_mismatch(self, item, mismatch_description):
mismatch_description.append_text('was ').append_description_of(item)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.