repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
usersource/anno | refs/heads/master | anno_gec_server/lib/requests/compat.py | 1038 | # -*- coding: utf-8 -*-
"""
pythoncompat
"""
from .packages import chardet
import sys
# -------
# Pythons
# -------
# Syntax sugar.
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
try:
import simplejson as json
except (ImportError, SyntaxError):
# simplejson does not support Python 3.2, it throws a SyntaxError
# because of u'...' Unicode literals.
import json
# ---------
# Specifics
# ---------
if is_py2:
from urllib import quote, unquote, quote_plus, unquote_plus, urlencode, getproxies, proxy_bypass
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag
from urllib2 import parse_http_list
import cookielib
from Cookie import Morsel
from StringIO import StringIO
from .packages.urllib3.packages.ordered_dict import OrderedDict
builtin_str = str
bytes = str
str = unicode
basestring = basestring
numeric_types = (int, long, float)
elif is_py3:
from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
from urllib.request import parse_http_list, getproxies, proxy_bypass
from http import cookiejar as cookielib
from http.cookies import Morsel
from io import StringIO
from collections import OrderedDict
builtin_str = str
str = str
bytes = bytes
basestring = (str, bytes)
numeric_types = (int, float)
|
kurazu/bridge | refs/heads/master | setup.py | 1 | import os
from setuptools import find_packages, setup, Extension
SPIDER_MONKEY_PREFIX = os.environ.get('SPIDER_MONKEY_PREFIX', '/usr/local')
SPIDER_MONKEY_VERSION = os.environ.get('SPIDER_MONKEY_VERSION', 'mozjs-')
runjs = Extension(
'runjs',
sources=['src/runjs_sm.cpp', 'src/runjs_type.cpp', 'src/runjs_module.cpp'],
include_dirs=[
os.path.join(SPIDER_MONKEY_PREFIX, 'include', SPIDER_MONKEY_VERSION)
],
libraries=[SPIDER_MONKEY_VERSION, 'm', 'dl'],
library_dirs=[
os.path.join(SPIDER_MONKEY_PREFIX, 'lib')
],
extra_compile_args=['-std=gnu++0x'],
)
setup(
name='bridge',
version='0.1',
author='Tomasz Maćkowiak',
author_email='kurazu@kurazu.net',
description='This is a demo of Python - JS integration',
url='https://github.com/kurazu/bridge',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
],
ext_modules=[runjs],
packages=find_packages('src', exclude=['examples']),
package_dir={'': 'src'},
include_package_data=False,
)
|
crepererum/invenio | refs/heads/master | invenio/modules/uploader/api.py | 13 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Uploader API.
Following example shows how to use this API for an easy use case::
>>> from invenio.modules.uploader.api import run
>>> blob = open('./testsuite/data/demo_record_marc_data.xml').read()
>>> reader_info = dict(schema='xml')
>>> run('insert', blob, master_format='marc', reader_info=reader_info)
"""
from __future__ import print_function
from celery import chord
from invenio.base.globals import cfg
from invenio.modules.jsonalchemy.reader import split_blob
from . import signals
from .tasks import translate, run_workflow
def run(name, input_file, master_format='marc', reader_info={}, **kwargs):
"""Entry point to run any of the modes of the uploader.
:param name: Upload mode, see `~.config.UPLOADER_WORKFLOWS` for more info.
:type name: str
:input_file: Input master format, typically the content of an XML file.
:type input_file: str
:param master_format: Input file format, for example `marc`
:type master_format: str
:param reader_info: Any kind of information relevan to the reader, like for
example char encoding or special characters.
:type reader_info: dict
:param kwargs:
* force:
* pretend:
* sync: False by default, if set to True the hole process will be
teated synchronously
* filename: original blob filename if it contains relative paths
"""
signals.uploader_started.send(mode=name,
blob=input_file,
master_format=master_format,
**kwargs)
for chunk in split_blob(input_file, master_format,
cfg['UPLOADER_NUMBER_RECORD_PER_WORKER'],
**reader_info):
chord(translate.starmap(
[(blob, master_format, reader_info) for blob in chunk])
)(run_workflow.s(name=name, **kwargs))
|
Jimdo/beeswithmachineguns | refs/heads/master | setup.py | 4 | #!/usr/bin/env python
from distutils.core import setup
setup(name='beeswithmachineguns',
version='0.1.4',
description='A utility for arming (creating) many bees (micro EC2 instances) to attack (load test) targets (web applications).',
author='Christopher Groskopf',
author_email='cgroskopf@tribune.com',
url='http://github.com/newsapps/beeswithmachineguns',
license='MIT',
packages=['beeswithmachineguns'],
scripts=['bees'],
install_requires=[
'boto==2.27.0',
'paramiko==1.14.0'
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Testing :: Traffic Generation',
'Topic :: Utilities',
],
)
|
simbs/edx-platform | refs/heads/master | common/djangoapps/geoinfo/tests/__init__.py | 12133432 | |
ammaradil/fibonacci | refs/heads/master | Lib/site-packages/django/contrib/staticfiles/management/commands/__init__.py | 12133432 | |
xflr6/fileconfig | refs/heads/master | fileconfig/tools.py | 1 | # tools.py - runtime path inspection
import inspect
import os
import sys
__all__ = ['class_path', 'caller_path']
def class_path(cls):
"""Return the path to the source file of the given class."""
if cls.__module__ == '__main__':
path = None
else:
path = os.path.dirname(inspect.getfile(cls))
if not path:
path = os.getcwd()
return os.path.realpath(path)
def caller_path(steps=1):
"""Return the path to the source file of the current frames' caller."""
frame = sys._getframe(steps + 1)
try:
path = os.path.dirname(frame.f_code.co_filename)
finally:
del frame
if not path:
path = os.getcwd()
return os.path.realpath(path)
|
jakobwilm/pcl | refs/heads/master | geometry/include/pcl/geometry/mesh_circulators.py | 69 | ##
# Software License Agreement (BSD License)
#
# Point Cloud Library (PCL) - www.pointclouds.org
# Copyright (c) 2009-2012, Willow Garage, Inc.
# Copyright (c) 2012-, Open Perception, Inc.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# # Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# # Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# # Neither the name of the copyright holder(s) nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import os
filename = os.path.join (os.path.dirname (__file__), 'mesh_circulators.h')
class Class:
def __init__ (self, value_prefix, value_type, around_type, current_he, deref, inc1, inc2, dec1, dec2):
self.value_prefix = value_prefix
self.value_type = value_type
self.around_type = around_type
self.current_he = current_he
self.deref = deref
self.inc1 = inc1
self.inc2 = inc2
self.dec1 = dec1
self.dec2 = dec2
self.docstring = 'TODO: Add documentation!'
classes = []
classes.append (Class ('' , 'Vertex' , 'Vertex', 'Outgoing', 'TerminatingVertex', 'Opposite', 'Next' , 'Prev' , 'Opposite')) # 0
classes.append (Class ('Outgoing', 'HalfEdge', 'Vertex', 'Outgoing', '' , 'Opposite', 'Next' , 'Prev' , 'Opposite')) # 1
classes.append (Class ('Incoming', 'HalfEdge', 'Vertex', 'Incoming', '' , 'Next' , 'Opposite', 'Opposite', 'Prev' )) # 2
classes.append (Class ('' , 'Face' , 'Vertex', 'Outgoing', 'Face' , 'Opposite', 'Next' , 'Prev' , 'Opposite')) # 3
classes.append (Class ('' , 'Vertex' , 'Face' , 'Inner' , 'TerminatingVertex', 'Next' , '' , 'Prev' , '' )) # 4
classes.append (Class ('Inner' , 'HalfEdge', 'Face' , 'Inner' , '' , 'Next' , '' , 'Prev' , '' )) # 5
classes.append (Class ('Outer' , 'HalfEdge', 'Face' , 'Inner' , 'OppositeHalfEdge' , 'Next' , '' , 'Prev' , '' )) # 6
classes.append (Class ('' , 'Face' , 'Face' , 'Inner' , 'OppositeFace' , 'Next' , '' , 'Prev' , '' )) # 7
classes [0].docstring = 'Circulates counter-clockwise around a vertex and returns an index to the terminating vertex of the outgoing half-edge (the target).'
classes [1].docstring = 'Circulates counter-clockwise around a vertex and returns an index to the outgoing half-edge (the target).'
classes [2].docstring = 'Circulates counter-clockwise around a vertex and returns an index to the incoming half-edge (the target).'
classes [3].docstring = 'Circulates counter-clockwise around a vertex and returns an index to the face of the outgoing half-edge (the target).'
classes [4].docstring = 'Circulates clockwise around a face and returns an index to the terminating vertex of the inner half-edge (the target).'
classes [5].docstring = 'Circulates clockwise around a face and returns an index to the inner half-edge (the target).'
classes [6].docstring = 'Circulates clockwise around a face and returns an index to the outer half-edge (the target).'
classes [7].docstring = 'Circulates clockwise around a face and returns an index to the face of the outer half-edge (the target).'
################################################################################
f = open (filename, 'w')
f.write ('/*\n')
f.write (' * Software License Agreement (BSD License)\n')
f.write (' *\n')
f.write (' * Point Cloud Library (PCL) - www.pointclouds.org\n')
f.write (' * Copyright (c) 2009-2012, Willow Garage, Inc.\n')
f.write (' * Copyright (c) 2012-, Open Perception, Inc.\n')
f.write (' *\n')
f.write (' * All rights reserved.\n')
f.write (' *\n')
f.write (' * Redistribution and use in source and binary forms, with or without\n')
f.write (' * modification, are permitted provided that the following conditions\n')
f.write (' * are met:\n')
f.write (' *\n')
f.write (' * * Redistributions of source code must retain the above copyright\n')
f.write (' * notice, this list of conditions and the following disclaimer.\n')
f.write (' * * Redistributions in binary form must reproduce the above\n')
f.write (' * copyright notice, this list of conditions and the following\n')
f.write (' * disclaimer in the documentation and/or other materials provided\n')
f.write (' * with the distribution.\n')
f.write (' * * Neither the name of the copyright holder(s) nor the names of its\n')
f.write (' * contributors may be used to endorse or promote products derived\n')
f.write (' * from this software without specific prior written permission.\n')
f.write (' *\n')
f.write (' * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n')
f.write (' * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n')
f.write (' * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n')
f.write (' * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n')
f.write (' * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n')
f.write (' * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n')
f.write (' * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n')
f.write (' * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n')
f.write (' * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n')
f.write (' * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n')
f.write (' * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n')
f.write (' * POSSIBILITY OF SUCH DAMAGE.\n')
f.write (' *\n')
f.write (' * $Id$\n')
f.write (' *\n')
f.write (' */\n\n')
f.write ("// NOTE: This file has been created with 'pcl_src/geometry/include/pcl/geometry/mesh_circulators.py'\n\n")
f.write ('#ifndef PCL_GEOMETRY_MESH_CIRCULATORS_H\n')
f.write ('#define PCL_GEOMETRY_MESH_CIRCULATORS_H\n\n')
f.write ('#include <pcl/geometry/boost.h>\n')
f.write ('#include <pcl/geometry/mesh_indices.h>\n\n')
for c in classes:
value_prefix = c.value_prefix
value_name = c.value_prefix + c.value_type
value_type = c.value_type
around_type = c.around_type
around_obj = around_type.lower ()
around_idx = 'idx_' + around_obj
current_he = c.current_he
current_he_idx = 'idx_' + current_he.lower () + '_half_edge'
current_he_idx_ = current_he_idx + '_'
deref = c.deref
inc1 = c.inc1
inc2 = c.inc2
dec1 = c.dec1
dec2 = c.dec2
class_name = value_name + 'Around' + around_type + 'Circulator'
placeholder_at = ' ' * (len (around_type) - 3)
placeholder_cn = ' ' * len (class_name)
placeholder_gt = ' ' * (len (current_he_idx) - 5)
f.write ('////////////////////////////////////////////////////////////////////////////////\n')
f.write ('// ' + class_name + '\n')
f.write ('////////////////////////////////////////////////////////////////////////////////\n\n')
f.write ('namespace pcl\n')
f.write ('{\n')
f.write (' namespace geometry\n')
f.write (' {\n')
f.write (' /** \\brief ' + c.docstring + ' The best way to declare the circulator is to use the method pcl::geometry::MeshBase::get' + class_name + ' ().\n')
f.write (" * \\tparam MeshT Mesh to which this circulator belongs to.\n")
f.write (" * \\note The circulator can't be used to change the connectivity in the mesh (only const circulators are valid).\n")
f.write (' * \\author Martin Saelzle\n')
f.write (' * \ingroup geometry\n')
f.write (' */\n')
f.write (' template <class MeshT>\n')
f.write (' class ' + class_name + '\n')
f.write (' : boost::equality_comparable <pcl::geometry::' + class_name + ' <MeshT>\n')
f.write (' , boost::unit_steppable <pcl::geometry::' + class_name + ' <MeshT>\n')
f.write (' > >\n')
f.write (' {\n')
f.write (' public:\n\n')
f.write (' typedef boost::equality_comparable <pcl::geometry::' + class_name + ' <MeshT>\n')
f.write (' , boost::unit_steppable <pcl::geometry::' + class_name + ' <MeshT> > > Base;\n')
f.write (' typedef pcl::geometry::' + class_name + ' <MeshT> Self;\n\n')
f.write (' typedef MeshT Mesh;\n')
if value_type != 'HalfEdge':
f.write (' typedef typename Mesh::' + value_type + 'Index ' + value_type + 'Index;\n')
if around_type != value_type:
f.write (' typedef typename Mesh::' + around_type + 'Index ' + around_type + 'Index;\n')
f.write (' typedef typename Mesh::HalfEdgeIndex HalfEdgeIndex;\n\n')
f.write (' /** \\brief Constructor resulting in an invalid circulator. */\n')
f.write (' ' + class_name + ' ()\n')
f.write (' : mesh_ ' + placeholder_gt + ' (NULL),\n')
f.write (' ' + current_he_idx_ + ' ()\n')
f.write (' {\n')
f.write (' }\n\n')
f.write (' /** \\brief Construct from the ' + around_obj + ' around which we want to circulate. */\n')
f.write (' ' + class_name + ' (const ' + around_type + 'Index& ' + around_idx + ',\n')
f.write (' ' + placeholder_cn + ' Mesh*const ' + placeholder_at + ' ' + 'mesh)\n')
f.write (' : mesh_ ' + placeholder_gt + ' (mesh),\n')
f.write (' ' + current_he_idx_ + ' (mesh->get' + current_he + 'HalfEdgeIndex (' + around_idx + '))\n')
f.write (' {\n')
f.write (' }\n\n')
f.write (' /** \\brief Construct directly from the ' + current_he.lower () + ' half-edge. */\n')
f.write (' ' + class_name + ' (const HalfEdgeIndex& ' + current_he_idx + ',\n')
f.write (' ' + placeholder_cn + ' Mesh*const ' + 'mesh)\n')
f.write (' : mesh_ ' + placeholder_gt + ' (mesh),\n')
f.write (' ' + current_he_idx_ + ' (' + current_he_idx + ')\n')
f.write (' {\n')
f.write (' }\n\n')
f.write (' /** \\brief Check if the circulator is valid.\n')
f.write (' * \\warning Does NOT check if the stored mesh pointer is valid. You have to ensure this yourself when constructing the circulator. */\n')
f.write (' inline bool\n')
f.write (' isValid () const\n')
f.write (' {\n')
f.write (' return (' + current_he_idx_ + '.isValid ());\n')
f.write (' }\n\n')
f.write (' /** \\brief Comparison operators (with boost::operators): == !=\n')
f.write (' * \\warning Does NOT check if the circulators belong to the same mesh. Please check this yourself. */\n')
f.write (' inline bool\n')
f.write (' operator == (const Self& other) const\n')
f.write (' {\n')
f.write (' return (' + current_he_idx_ + ' == other.' + current_he_idx_ + ');\n')
f.write (' }\n\n')
tmp = 'mesh_->get' + inc1 + 'HalfEdgeIndex (' + current_he_idx_ + ')'
if inc2:
tmp = 'mesh_->get' + inc2 + 'HalfEdgeIndex (' + tmp + ')'
f.write (' /** \\brief Increment operators (with boost::operators): ++ (pre and post) */\n')
f.write (' inline Self&\n')
f.write (' operator ++ ()\n')
f.write (' {\n')
f.write (' ' + current_he_idx_ + ' = ' + tmp + ';\n')
f.write (' return (*this);\n')
f.write (' }\n\n')
tmp = 'mesh_->get' + dec1 + 'HalfEdgeIndex (' + current_he_idx_ + ')'
if dec2:
tmp = 'mesh_->get' + dec2 + 'HalfEdgeIndex (' + tmp + ')'
f.write (' /** \\brief Decrement operators (with boost::operators): -- (pre and post) */\n')
f.write (' inline Self&\n')
f.write (' operator -- ()\n')
f.write (' {\n')
f.write (' ' + current_he_idx_ + ' = ' + tmp + ';\n')
f.write (' return (*this);\n')
f.write (' }\n\n')
if deref:
tmp = 'mesh_->get' + deref + 'Index (' + current_he_idx_ + ')'
else:
tmp = current_he_idx_
tgt = 'half-edge' if value_type=='HalfEdge' else value_type.lower ()
tgt = value_prefix.lower () + (' ' if value_prefix else 'target ') + tgt
f.write (' /** \\brief Get the index to the ' + tgt + '. */\n')
f.write (' inline ' + value_type + 'Index\n')
f.write (' getTargetIndex () const\n')
f.write (' {\n')
f.write (' return (' + tmp + ');\n')
f.write (' }\n\n')
f.write (' /** \\brief Get the half-edge that is currently stored in the circulator. */\n')
f.write (' inline HalfEdgeIndex\n')
f.write (' getCurrentHalfEdgeIndex () const\n')
f.write (' {\n')
f.write (' return (' + current_he_idx_ + ');\n')
f.write (' }\n\n')
f.write (' /** \\brief The mesh to which this circulator belongs to. */\n')
f.write (' Mesh* mesh_;\n\n')
f.write (' /** \\brief The ' + current_he.lower () + ' half-edge of the ' + around_obj + ' around which we want to circulate. */\n')
f.write (' HalfEdgeIndex ' + current_he_idx_ + ';\n')
f.write (' };\n')
f.write (' } // End namespace geometry\n')
f.write ('} // End namespace pcl\n\n')
f.write ('#endif // PCL_GEOMETRY_MESH_CIRCULATORS_H\n')
f.close()
|
GetSomeBlocks/Score_Soccer | refs/heads/master | resources/lib/mockito/mockito/__init__.py | 7 | from mockito import * |
ammaritiz/pulp_puppet | refs/heads/master | pulp_puppet_common/test/__init__.py | 12133432 | |
zzzombat/lucid-python-django | refs/heads/master | tests/regressiontests/admin_filterspecs/__init__.py | 12133432 | |
epfl-cosmo/lammps | refs/heads/master | tools/moltemplate/moltemplate/nbody_alt_symmetry/dihedrals_nosym.py | 13 | try:
from ..nbody_graph_search import Ugraph
except:
# not installed as a module
from nbody_graph_search import Ugraph
# To find 4-body "dihedral" interactions, we would use this subgraph:
#
# 1st bond connects atoms 0 and 1
# *---*---*---* => 2nd bond connects atoms 1 and 2
# 0 1 2 3 3rd bond connects atoms 2 and 3
#
bond_pattern = Ugraph([(0,1), (1,2), (2,3)])
# (Ugraph atom indices begin at 0, not 1)
def canonical_order(match):
"""
When searching for atoms with matching bond patterns GraphMatcher
often returns redundant results. We must define a "canonical_order"
function which sorts the atoms and bonds in a way which is consistent
with the type of N-body interaction being considered.
However, some dihedral_styles (such as dihedral_style class2)
have no symmetry (at least not for arbitrary choices of parameters).
These force-field styles, the different permulations of atom-order
are not equivalent. So we do not want to rearrange the order of
the atoms (and bonds) in the match, because the formula for the
interaction between atoms 1,2,3,4 is not the same as the formula
for the interaction between atoms 4,3,2,1.
In this case, this function returns
the original "match" argument unmodified.
"""
return match
|
Sinkmanu/auth-system-chronos | refs/heads/master | Chronos Auth System/tools/tools/urwid/text_layout.py | 8 | #!/usr/bin/python
#
# Urwid Text Layout classes
# Copyright (C) 2004-2007 Ian Ward
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Urwid web site: http://excess.org/urwid/
from util import *
class TextLayout:
def supports_align_mode(self, align):
"""Return True if align is a supported align mode."""
return True
def supports_wrap_mode(self, wrap):
"""Return True if wrap is a supported wrap mode."""
return True
def layout(self, text, width, align, wrap ):
"""
Return a layout structure for text.
text -- string in current encoding or unicode string
width -- number of screen columns available
align -- align mode for text
wrap -- wrap mode for text
Layout structure is a list of line layouts, one per output line.
Line layouts are lists than may contain the following tuples:
( column width of text segment, start offset, end offset )
( number of space characters to insert, offset or None)
( column width of insert text, offset, "insert text" )
The offset in the last two tuples is used to determine the
attribute used for the inserted spaces or text respectively.
The attribute used will be the same as the attribute at that
text offset. If the offset is None when inserting spaces
then no attribute will be used.
"""
assert 0, ("This function must be overridden by a real"
" text layout class. (see StandardTextLayout)")
return [[]]
class StandardTextLayout(TextLayout):
def __init__(self):#, tab_stops=(), tab_stop_every=8):
pass
#"""
#tab_stops -- list of screen column indexes for tab stops
#tab_stop_every -- repeated interval for following tab stops
#"""
#assert tab_stop_every is None or type(tab_stop_every)==type(0)
#if not tab_stops and tab_stop_every:
# self.tab_stops = (tab_stop_every,)
#self.tab_stops = tab_stops
#self.tab_stop_every = tab_stop_every
def supports_align_mode(self, align):
"""Return True if align is 'left', 'center' or 'right'."""
return align in ('left', 'center', 'right')
def supports_wrap_mode(self, wrap):
"""Return True if wrap is 'any', 'space' or 'clip'."""
return wrap in ('any', 'space', 'clip')
def layout(self, text, width, align, wrap ):
"""Return a layout structure for text."""
segs = self.calculate_text_segments( text, width, wrap )
return self.align_layout( text, width, segs, wrap, align )
def pack(self, maxcol, layout):
"""
Return a minimal maxcol value that would result in the same
number of lines for layout. layout must be a layout structure
returned by self.layout().
"""
maxwidth = 0
assert layout, "huh? empty layout?: "+`layout`
for l in layout:
lw = line_width(l)
if lw >= maxcol:
return maxcol
maxwidth = max(maxwidth, lw)
return maxwidth
def align_layout( self, text, width, segs, wrap, align ):
"""Convert the layout segs to an aligned layout."""
out = []
for l in segs:
sc = line_width(l)
if sc == width or align=='left':
out.append(l)
continue
if align == 'right':
out.append([(width-sc, None)] + l)
continue
assert align == 'center'
out.append([((width-sc+1)/2, None)] + l)
return out
def calculate_text_segments( self, text, width, wrap ):
"""
Calculate the segments of text to display given width screen
columns to display them.
text - text to display
width - number of available screen columns
wrap - wrapping mode used
Returns a layout structure without aligmnent applied.
"""
b = []
p = 0
if wrap == 'clip':
# no wrapping to calculate, so it's easy.
while p<=len(text):
n_cr = text.find("\n", p)
if n_cr == -1:
n_cr = len(text)
sc = calc_width(text, p, n_cr)
l = [(0,n_cr)]
if p!=n_cr:
l = [(sc, p, n_cr)] + l
b.append(l)
p = n_cr+1
return b
while p<=len(text):
# look for next eligible line break
n_cr = text.find("\n", p)
if n_cr == -1:
n_cr = len(text)
sc = calc_width(text, p, n_cr)
if sc == 0:
# removed character hint
b.append([(0,n_cr)])
p = n_cr+1
continue
if sc <= width:
# this segment fits
b.append([(sc,p,n_cr),
# removed character hint
(0,n_cr)])
p = n_cr+1
continue
pos, sc = calc_text_pos( text, p, n_cr, width )
# FIXME: handle pathological width=1 double-byte case
if wrap == 'any':
b.append([(sc,p,pos)])
p = pos
continue
assert wrap == 'space'
if text[pos] == " ":
# perfect space wrap
b.append([(sc,p,pos),
# removed character hint
(0,pos)])
p = pos+1
continue
if is_wide_char(text, pos):
# perfect next wide
b.append([(sc,p,pos)])
p = pos
continue
prev = pos
while prev > p:
prev = move_prev_char(text, p, prev)
if text[prev] == " ":
sc = calc_width(text,p,prev)
l = [(0,prev)]
if p!=prev:
l = [(sc,p,prev)] + l
b.append(l)
p = prev+1
break
if is_wide_char(text,prev):
# wrap after wide char
next = move_next_char(text, prev, pos)
sc = calc_width(text,p,next)
b.append([(sc,p,next)])
p = next
break
else:
# unwrap previous line space if possible to
# fit more text (we're breaking a word anyway)
if b and (len(b[-1]) == 2 or ( len(b[-1])==1
and len(b[-1][0])==2 )):
# look for removed space above
if len(b[-1]) == 1:
[(h_sc, h_off)] = b[-1]
p_sc = 0
p_off = p_end = h_off
else:
[(p_sc, p_off, p_end),
(h_sc, h_off)] = b[-1]
if (p_sc < width and h_sc==0 and
text[h_off] == " "):
# combine with previous line
del b[-1]
p = p_off
pos, sc = calc_text_pos(
text, p, n_cr, width )
b.append([(sc,p,pos)])
# check for trailing " " or "\n"
p = pos
if p < len(text) and (
text[p] in (" ","\n")):
# removed character hint
b[-1].append((0,p))
p += 1
continue
# force any char wrap
b.append([(sc,p,pos)])
p = pos
return b
######################################
# default layout object to use
default_layout = StandardTextLayout()
######################################
class LayoutSegment:
def __init__(self, seg):
"""Create object from line layout segment structure"""
assert type(seg) == type(()), `seg`
assert len(seg) in (2,3), `seg`
self.sc, self.offs = seg[:2]
assert type(self.sc) == type(0), `self.sc`
if len(seg)==3:
assert type(self.offs) == type(0), `self.offs`
assert self.sc > 0, `seg`
t = seg[2]
if type(t) == type(""):
self.text = t
self.end = None
else:
assert type(t) == type(0), `t`
self.text = None
self.end = t
else:
assert len(seg) == 2, `seg`
if self.offs is not None:
assert self.sc >= 0, `seg`
assert type(self.offs)==type(0)
self.text = self.end = None
def subseg(self, text, start, end):
"""
Return a "sub-segment" list containing segment structures
that make up a portion of this segment.
A list is returned to handle cases where wide characters
need to be replaced with a space character at either edge
so two or three segments will be returned.
"""
if start < 0: start = 0
if end > self.sc: end = self.sc
if start >= end:
return [] # completely gone
if self.text:
# use text stored in segment (self.text)
spos, epos, pad_left, pad_right = calc_trim_text(
self.text, 0, len(self.text), start, end )
return [ (end-start, self.offs, " "*pad_left +
self.text[spos:epos] + " "*pad_right) ]
elif self.end:
# use text passed as parameter (text)
spos, epos, pad_left, pad_right = calc_trim_text(
text, self.offs, self.end, start, end )
l = []
if pad_left:
l.append((1,spos-1))
l.append((end-start-pad_left-pad_right, spos, epos))
if pad_right:
l.append((1,epos))
return l
else:
# simple padding adjustment
return [(end-start,self.offs)]
def line_width( segs ):
"""
Return the screen column width of one line of a text layout structure.
This function ignores any existing shift applied to the line,
represended by an (amount, None) tuple at the start of the line.
"""
sc = 0
seglist = segs
if segs and len(segs[0])==2 and segs[0][1]==None:
seglist = segs[1:]
for s in seglist:
sc += s[0]
return sc
def shift_line( segs, amount ):
"""
Return a shifted line from a layout structure to the left or right.
segs -- line of a layout structure
amount -- screen columns to shift right (+ve) or left (-ve)
"""
assert type(amount)==type(0), `amount`
if segs and len(segs[0])==2 and segs[0][1]==None:
# existing shift
amount += segs[0][0]
if amount:
return [(amount,None)]+segs[1:]
return segs[1:]
if amount:
return [(amount,None)]+segs
return segs
def trim_line( segs, text, start, end ):
"""
Return a trimmed line of a text layout structure.
text -- text to which this layout structre applies
start -- starting screen column
end -- ending screen column
"""
l = []
x = 0
for seg in segs:
sc = seg[0]
if start or sc < 0:
if start >= sc:
start -= sc
x += sc
continue
s = LayoutSegment(seg)
if x+sc >= end:
# can all be done at once
return s.subseg( text, start, end-x )
l += s.subseg( text, start, sc )
start = 0
x += sc
continue
if x >= end:
break
if x+sc > end:
s = LayoutSegment(seg)
l += s.subseg( text, 0, end-x )
break
l.append( seg )
return l
def calc_line_pos( text, line_layout, pref_col ):
"""
Calculate the closest linear position to pref_col given a
line layout structure. Returns None if no position found.
"""
closest_sc = None
closest_pos = None
current_sc = 0
if pref_col == 'left':
for seg in line_layout:
s = LayoutSegment(seg)
if s.offs is not None:
return s.offs
return
elif pref_col == 'right':
for seg in line_layout:
s = LayoutSegment(seg)
if s.offs is not None:
closest_pos = s
s = closest_pos
if s is None:
return
if s.end is None:
return s.offs
return calc_text_pos( text, s.offs, s.end, s.sc-1)[0]
for seg in line_layout:
s = LayoutSegment(seg)
if s.offs is not None:
if s.end is not None:
if (current_sc <= pref_col and
pref_col < current_sc + s.sc):
# exact match within this segment
return calc_text_pos( text,
s.offs, s.end,
pref_col - current_sc )[0]
elif current_sc <= pref_col:
closest_sc = current_sc + s.sc - 1
closest_pos = s
if closest_sc is None or ( abs(pref_col-current_sc)
< abs(pref_col-closest_sc) ):
# this screen column is closer
closest_sc = current_sc
closest_pos = s.offs
if current_sc > closest_sc:
# we're moving past
break
current_sc += s.sc
if closest_pos is None or type(closest_pos) == type(0):
return closest_pos
# return the last positions in the segment "closest_pos"
s = closest_pos
return calc_text_pos( text, s.offs, s.end, s.sc-1)[0]
def calc_pos( text, layout, pref_col, row ):
"""
Calculate the closest linear position to pref_col and row given a
layout structure.
"""
if row < 0 or row >= len(layout):
raise Exception("calculate_pos: out of layout row range")
pos = calc_line_pos( text, layout[row], pref_col )
if pos is not None:
return pos
rows_above = range(row-1,-1,-1)
rows_below = range(row+1,len(layout))
while rows_above and rows_below:
if rows_above:
r = rows_above.pop(0)
pos = calc_line_pos(text, layout[r], pref_col)
if pos is not None: return pos
if rows_below:
r = rows_below.pop(0)
pos = calc_line_pos(text, layout[r], pref_col)
if pos is not None: return pos
return 0
def calc_coords( text, layout, pos, clamp=1 ):
"""
Calculate the coordinates closest to position pos in text with layout.
text -- raw string or unicode string
layout -- layout structure applied to text
pos -- integer position into text
clamp -- ignored right now
"""
closest = None
y = 0
for line_layout in layout:
x = 0
for seg in line_layout:
s = LayoutSegment(seg)
if s.offs is None:
x += s.sc
continue
if s.offs == pos:
return x,y
if s.end is not None and s.offs<=pos and s.end>pos:
x += calc_width( text, s.offs, pos )
return x,y
distance = abs(s.offs - pos)
if s.end is not None and s.end<pos:
distance = pos - (s.end-1)
if closest is None or distance < closest[0]:
closest = distance, (x,y)
x += s.sc
y += 1
if closest:
return closest[1]
return 0,0
|
fighterCui/L4ReFiascoOC | refs/heads/master | l4/pkg/python/contrib/Lib/plat-mac/appletrunner.py | 33 | #!/usr/bin/env python
# This file is meant as an executable script for running applets.
# BuildApplet will use it as the main executable in the .app bundle if
# we are not running in a framework build.
from warnings import warnpy3k
warnpy3k("In 3.x, appletrunner is removed.", stacklevel=2)
import os
import sys
for name in ["__rawmain__.py", "__rawmain__.pyc", "__main__.py", "__main__.pyc"]:
realmain = os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])),
"Resources", name)
if os.path.exists(realmain):
break
else:
sys.stderr.write("%s: cannot find applet main program\n" % sys.argv[0])
sys.exit(1)
sys.argv.insert(1, realmain)
os.execve(sys.executable, sys.argv, os.environ)
|
marqh/iris | refs/heads/master | lib/iris/tests/integration/plot/test_colorbar.py | 6 | # (C) British Crown Copyright 2014 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Test interaction between :mod:`iris.plot` and
:func:`matplotlib.pyplot.colorbar`
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# import iris tests first so that some things can be initialised before
# importing anything else
import iris.tests as tests
import numpy as np
from iris.coords import AuxCoord
import iris.tests.stock
# Run tests in no graphics mode if matplotlib is not available.
if tests.MPL_AVAILABLE:
import matplotlib.pyplot as plt
from iris.plot import contour, contourf, pcolormesh, pcolor,\
points, scatter
@tests.skip_plot
class TestColorBarCreation(tests.GraphicsTest):
def setUp(self):
super(TestColorBarCreation, self).setUp()
self.draw_functions = (contour, contourf, pcolormesh, pcolor)
self.cube = iris.tests.stock.lat_lon_cube()
self.cube.coord('longitude').guess_bounds()
self.cube.coord('latitude').guess_bounds()
self.traj_lon = AuxCoord(np.linspace(-180, 180, 50),
standard_name='longitude',
units='degrees')
self.traj_lat = AuxCoord(np.sin(np.deg2rad(self.traj_lon.points))*30.0,
standard_name='latitude',
units='degrees')
def test_common_draw_functions(self):
for draw_function in self.draw_functions:
mappable = draw_function(self.cube)
cbar = plt.colorbar()
self.assertIs(
cbar.mappable, mappable,
msg='Problem with draw function iris.plot.{}'.format(
draw_function.__name__))
def test_common_draw_functions_specified_mappable(self):
for draw_function in self.draw_functions:
mappable_initial = draw_function(self.cube, cmap='cool')
mappable = draw_function(self.cube)
cbar = plt.colorbar(mappable_initial)
self.assertIs(
cbar.mappable, mappable_initial,
msg='Problem with draw function iris.plot.{}'.format(
draw_function.__name__))
def test_points_with_c_kwarg(self):
mappable = points(self.cube, c=self.cube.data)
cbar = plt.colorbar()
self.assertIs(cbar.mappable, mappable)
def test_points_with_c_kwarg_specified_mappable(self):
mappable_initial = points(self.cube, c=self.cube.data, cmap='cool')
mappable = points(self.cube, c=self.cube.data)
cbar = plt.colorbar(mappable_initial)
self.assertIs(cbar.mappable, mappable_initial)
def test_scatter_with_c_kwarg(self):
mappable = scatter(self.traj_lon, self.traj_lat,
c=self.traj_lon.points)
cbar = plt.colorbar()
self.assertIs(cbar.mappable, mappable)
def test_scatter_with_c_kwarg_specified_mappable(self):
mappable_initial = scatter(self.traj_lon, self.traj_lat,
c=self.traj_lon.points)
mappable = scatter(self.traj_lon, self.traj_lat,
c=self.traj_lon.points,
cmap='cool')
cbar = plt.colorbar(mappable_initial)
self.assertIs(cbar.mappable, mappable_initial)
if __name__ == "__main__":
tests.main()
|
odoousers2014/odoo | refs/heads/master | addons/web/__init__.py | 435 | import sys
# Mock deprecated openerp.addons.web.http module
import openerp.http
sys.modules['openerp.addons.web.http'] = openerp.http
http = openerp.http
import controllers
|
eefriedman/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/webdriver/webdriver/keys.py | 263 | """Constants for special keys."""
class Keys:
"""Constants for special keys."""
NULL = '\uE000'
CANCEL = '\uE001'
HELP = '\uE002'
BACK_SPACE = '\uE003'
TAB = '\uE004'
CLEAR = '\uE005'
RETURN = '\uE006'
ENTER = '\uE007'
SHIFT = '\uE008'
LEFT_SHIFT = '\uE008'
CONTROL = '\uE009'
LEFT_CONTROL = '\uE009'
ALT = '\uE00A'
LEFT_ALT = '\uE00A'
PAUSE = '\uE00B'
ESCAPE = '\uE00C'
SPACE = '\uE00D'
PAGE_UP = '\uE00E'
PAGE_DOWN = '\uE00F'
END = '\uE010'
HOME = '\uE011'
LEFT = '\uE012'
ARROW_LEFT = '\uE012'
UP = '\uE013'
ARROW_UP = '\uE013'
RIGHT = '\uE014'
ARROW_RIGHT = '\uE014'
DOWN = '\uE015'
ARROW_DOWN = '\uE015'
INSERT = '\uE016'
DELETE = '\uE017'
SEMICOLON = '\uE018'
EQUALS = '\uE019'
NUMPAD0 = '\uE01A'
NUMPAD1 = '\uE01B'
NUMPAD2 = '\uE01C'
NUMPAD3 = '\uE01D'
NUMPAD4 = '\uE01E'
NUMPAD5 = '\uE01F'
NUMPAD6 = '\uE020'
NUMPAD7 = '\uE021'
NUMPAD8 = '\uE022'
NUMPAD9 = '\uE023'
MULTIPLY = '\uE024'
ADD = '\uE025'
SEPARATOR = '\uE026'
SUBTRACT = '\uE027'
DECIMAL = '\uE028'
DIVIDE = '\uE029'
F1 = '\uE031'
F2 = '\uE032'
F3 = '\uE033'
F4 = '\uE034'
F5 = '\uE035'
F6 = '\uE036'
F7 = '\uE037'
F8 = '\uE038'
F9 = '\uE039'
F10 = '\uE03A'
F11 = '\uE03B'
F12 = '\uE03C'
META = '\uE03D'
COMMAND = '\uE03D'
ZENKAKU_HANKAKU = '\uE040'
|
rsnakamura/iperflexer | refs/heads/master | iperflexer/tests/environment.py | 3 | from distutils.util import strtobool as _bool
import os
BEHAVE_DEBUG_ON_ERROR = _bool(os.environ.get("BEHAVE_DEBUG_ON_ERROR",
"no"))
def after_step(context, step):
if BEHAVE_DEBUG_ON_ERROR and step.status == 'failed':
import pudb
pudb.post_mortem(tb=step.exc_traceback,
e_type=None,
e_value=None)
return
|
unnikrishnankgs/va | refs/heads/master | venv/lib/python3.5/site-packages/numpy/doc/creation.py | 52 | """
==============
Array Creation
==============
Introduction
============
There are 5 general mechanisms for creating arrays:
1) Conversion from other Python structures (e.g., lists, tuples)
2) Intrinsic numpy array array creation objects (e.g., arange, ones, zeros,
etc.)
3) Reading arrays from disk, either from standard or custom formats
4) Creating arrays from raw bytes through the use of strings or buffers
5) Use of special library functions (e.g., random)
This section will not cover means of replicating, joining, or otherwise
expanding or mutating existing arrays. Nor will it cover creating object
arrays or structured arrays. Both of those are covered in their own sections.
Converting Python array_like Objects to NumPy Arrays
====================================================
In general, numerical data arranged in an array-like structure in Python can
be converted to arrays through the use of the array() function. The most
obvious examples are lists and tuples. See the documentation for array() for
details for its use. Some objects may support the array-protocol and allow
conversion to arrays this way. A simple way to find out if the object can be
converted to a numpy array using array() is simply to try it interactively and
see if it works! (The Python Way).
Examples: ::
>>> x = np.array([2,3,1,0])
>>> x = np.array([2, 3, 1, 0])
>>> x = np.array([[1,2.0],[0,0],(1+1j,3.)]) # note mix of tuple and lists,
and types
>>> x = np.array([[ 1.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j], [ 1.+1.j, 3.+0.j]])
Intrinsic NumPy Array Creation
==============================
NumPy has built-in functions for creating arrays from scratch:
zeros(shape) will create an array filled with 0 values with the specified
shape. The default dtype is float64.
``>>> np.zeros((2, 3))
array([[ 0., 0., 0.], [ 0., 0., 0.]])``
ones(shape) will create an array filled with 1 values. It is identical to
zeros in all other respects.
arange() will create arrays with regularly incrementing values. Check the
docstring for complete information on the various ways it can be used. A few
examples will be given here: ::
>>> np.arange(10)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.arange(2, 10, dtype=np.float)
array([ 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.arange(2, 3, 0.1)
array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9])
Note that there are some subtleties regarding the last usage that the user
should be aware of that are described in the arange docstring.
linspace() will create arrays with a specified number of elements, and
spaced equally between the specified beginning and end values. For
example: ::
>>> np.linspace(1., 4., 6)
array([ 1. , 1.6, 2.2, 2.8, 3.4, 4. ])
The advantage of this creation function is that one can guarantee the
number of elements and the starting and end point, which arange()
generally will not do for arbitrary start, stop, and step values.
indices() will create a set of arrays (stacked as a one-higher dimensioned
array), one per dimension with each representing variation in that dimension.
An example illustrates much better than a verbal description: ::
>>> np.indices((3,3))
array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]])
This is particularly useful for evaluating functions of multiple dimensions on
a regular grid.
Reading Arrays From Disk
========================
This is presumably the most common case of large array creation. The details,
of course, depend greatly on the format of data on disk and so this section
can only give general pointers on how to handle various formats.
Standard Binary Formats
-----------------------
Various fields have standard formats for array data. The following lists the
ones with known python libraries to read them and return numpy arrays (there
may be others for which it is possible to read and convert to numpy arrays so
check the last section as well)
::
HDF5: PyTables
FITS: PyFITS
Examples of formats that cannot be read directly but for which it is not hard to
convert are those formats supported by libraries like PIL (able to read and
write many image formats such as jpg, png, etc).
Common ASCII Formats
------------------------
Comma Separated Value files (CSV) are widely used (and an export and import
option for programs like Excel). There are a number of ways of reading these
files in Python. There are CSV functions in Python and functions in pylab
(part of matplotlib).
More generic ascii files can be read using the io package in scipy.
Custom Binary Formats
---------------------
There are a variety of approaches one can use. If the file has a relatively
simple format then one can write a simple I/O library and use the numpy
fromfile() function and .tofile() method to read and write numpy arrays
directly (mind your byteorder though!) If a good C or C++ library exists that
read the data, one can wrap that library with a variety of techniques though
that certainly is much more work and requires significantly more advanced
knowledge to interface with C or C++.
Use of Special Libraries
------------------------
There are libraries that can be used to generate arrays for special purposes
and it isn't possible to enumerate all of them. The most common uses are use
of the many array generation functions in random that can generate arrays of
random values, and some utility functions to generate special matrices (e.g.
diagonal).
"""
from __future__ import division, absolute_import, print_function
|
schwehr/gdal-autotest2 | refs/heads/master | python/gcore/geoloc_test.py | 1 | #!/usr/bin/env python
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is a complete rewrite of a file licensed as follows:
#
# Copyright (c) 2007, Frank Warmerdam <warmerdam@pobox.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""Test geolocation warper.
Rewrite of:
http://trac.osgeo.org/gdal/browser/trunk/autotest/gcore/geoloc.py
"""
import contextlib
import os
from osgeo import gdal
import unittest
from autotest2.gcore import gcore_util
from autotest2.gdrivers import gdrivers_util
EXT = '.vrt'
@contextlib.contextmanager
def PushDir(path):
orig_path = os.getcwd()
os.chdir(path)
yield
os.chdir(orig_path)
@gdrivers_util.SkipIfDriverMissing(gdrivers_util.VRT_DRIVER)
@gdrivers_util.SkipIfDriverMissing(gdrivers_util.GTIFF_DRIVER)
class GeolocTest(gdrivers_util.DriverTestCase):
def setUp(self):
super(GeolocTest, self).setUp(gdrivers_util.VRT_DRIVER, EXT)
def testGeoloc01WarpSst(self):
filepath = gcore_util.GetTestFilePath('warpsst.vrt')
with PushDir(os.path.dirname(filepath)):
self.CheckOpen(filepath)
self.CheckGeoTransform((-90.30271148, 0.15466423, 0, 33.87552642, 0,
-0.15466423))
# TODO(schwehr): The changing checksum of the band with GDAL updates implies
# that this test is brittle and needs to be reworked.
self.CheckBand(1, 62319, gdal.GDT_Int16)
if __name__ == '__main__':
unittest.main()
|
nylas/sync-engine | refs/heads/master | inbox/test/system/conftest.py | 3 | # This file contains pytest fixtures as well as some config
import os
import platform
API_BASE = "http://%s:%s" % (os.getenv("API_PORT_5555_TCP_ADDR", "localhost"), os.getenv("API_PORT_5555_TCP_PORT", "5555"))
TEST_MAX_DURATION_SECS = 360
TEST_GRANULARITY_CHECK_SECS = 0.1
from time import time, sleep
from client import NylasTestClient
from inbox.util.url import provider_from_address
from google_auth_helper import google_auth
from outlook_auth_helper import outlook_auth
from inbox.auth.base import handler_from_provider
# we don't want to commit passwords to the repo.
# load them from an external json file.
try:
from accounts import credentials as raw_credentials
credentials = [(c['user'], c['password']) for c in raw_credentials]
all_accounts = [NylasTestClient(email, API_BASE) for email, _ in credentials]
gmail_accounts = [NylasTestClient(email, API_BASE)
for email, password in credentials
if "gmail.com" in email or
"inboxapp.com" in email]
calendar_providers = ["gmail.com", "onmicrosoft.com"]
calendar_accounts = [NylasTestClient(email, API_BASE)
for email, password in credentials
if any(domain in email for domain in calendar_providers)]
except ImportError:
print ("Error: test accounts file not found. "
"You need to create accounts.py\n"
"File format: credentials = [{'user': 'bill@example.com', "
"'password': 'VerySecret'}]")
raise
def timeout_loop(name):
def wrap(f):
def wrapped_f(*args, **kwargs):
client = args[0]
print "Waiting for: {}...".format(name)
success = False
start_time = time()
while time() - start_time < TEST_MAX_DURATION_SECS:
if f(*args, **kwargs):
success = True
break
sleep(TEST_GRANULARITY_CHECK_SECS)
assert success, ("Failed to {} in less than {}s on {}"
.format(name, TEST_MAX_DURATION_SECS,
client.email_address))
format_test_result(name, client.provider,
client.email_address, start_time)
return True
return wrapped_f
return wrap
def format_test_result(function_name, provider, email, start_time):
print "%s\t%s\t%s\t%f" % (function_name, provider,
email, time() - start_time)
def create_account(db_session, email, password):
provider = provider_from_address(email)
auth_handler = handler_from_provider(provider)
# Special-case Gmail and Outlook, because we need to provide an oauth token
# and not merely a password.
response = {'email': email}
if provider == 'gmail':
code = google_auth(email, password)
response = auth_handler._get_authenticated_user(code)
elif provider == 'outlook':
code = outlook_auth(email, password)
response = auth_handler._get_authenticated_user(code)
else:
response = {"email": email, "password": password}
account = auth_handler.create_account(email, response)
auth_handler.verify_account(account)
account.throttled = False
account.sync_host = platform.node()
account.desired_sync_host = platform.node()
db_session.add(account)
db_session.commit()
return account
|
antoine-de/navitia | refs/heads/dev | source/sql/alembic/versions/11b1fb5bd523_add_boarding_and_alighting_time_to_stop_.py | 8 | """Add boarding and alighting time to stop_time
Revision ID: 11b1fb5bd523
Revises: 4429fe91ac94
Create Date: 2017-02-14 10:46:14.819580
"""
# revision identifiers, used by Alembic.
revision = '11b1fb5bd523'
down_revision = '4429fe91ac94'
from alembic import op
import sqlalchemy as sa
import geoalchemy2 as ga
def upgrade():
op.add_column('stop_time', sa.Column('alighting_time', sa.INTEGER(), nullable=True), schema='navitia')
op.add_column('stop_time', sa.Column('boarding_time', sa.INTEGER(), nullable=True), schema='navitia')
def downgrade():
op.drop_column('stop_time', 'boarding_time', schema='navitia')
op.drop_column('stop_time', 'alighting_time', schema='navitia')
|
mariosky/evo-drawings | refs/heads/master | venv/lib/python2.7/site-packages/django/contrib/gis/db/backends/spatialite/introspection.py | 221 | from django.contrib.gis.gdal import OGRGeomType
from django.db.backends.sqlite3.introspection import DatabaseIntrospection, FlexibleFieldLookupDict
from django.utils import six
class GeoFlexibleFieldLookupDict(FlexibleFieldLookupDict):
"""
Sublcass that includes updates the `base_data_types_reverse` dict
for geometry field types.
"""
base_data_types_reverse = FlexibleFieldLookupDict.base_data_types_reverse.copy()
base_data_types_reverse.update(
{'point' : 'GeometryField',
'linestring' : 'GeometryField',
'polygon' : 'GeometryField',
'multipoint' : 'GeometryField',
'multilinestring' : 'GeometryField',
'multipolygon' : 'GeometryField',
'geometrycollection' : 'GeometryField',
})
class SpatiaLiteIntrospection(DatabaseIntrospection):
data_types_reverse = GeoFlexibleFieldLookupDict()
def get_geometry_type(self, table_name, geo_col):
cursor = self.connection.cursor()
try:
# Querying the `geometry_columns` table to get additional metadata.
cursor.execute('SELECT "coord_dimension", "srid", "type" '
'FROM "geometry_columns" '
'WHERE "f_table_name"=%s AND "f_geometry_column"=%s',
(table_name, geo_col))
row = cursor.fetchone()
if not row:
raise Exception('Could not find a geometry column for "%s"."%s"' %
(table_name, geo_col))
# OGRGeomType does not require GDAL and makes it easy to convert
# from OGC geom type name to Django field.
field_type = OGRGeomType(row[2]).django
# Getting any GeometryField keyword arguments that are not the default.
dim = row[0]
srid = row[1]
field_params = {}
if srid != 4326:
field_params['srid'] = srid
if isinstance(dim, six.string_types) and 'Z' in dim:
field_params['dim'] = 3
finally:
cursor.close()
return field_type, field_params
|
timmygee/investorservitude | refs/heads/master | investorservitude/core/urls.py | 1 | from django.conf.urls import url, include
from rest_framework import routers
from rest_framework.authtoken.views import obtain_auth_token
from . import views
router = routers.DefaultRouter()
router.register(r'holdings', views.HoldingViewSet)
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
url(r'^', include(router.urls)),
url(r'^auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^obtain-auth-token/$', obtain_auth_token),
]
|
jaraco/paramiko | refs/heads/master | paramiko/kex_group14.py | 6 | # Copyright (C) 2013 Torsten Landschoff <torsten@debian.org>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Standard SSH key exchange ("kex" if you wanna sound cool). Diffie-Hellman of
2048 bit key halves, using a known "p" prime and "g" generator.
"""
from paramiko.kex_group1 import KexGroup1
from hashlib import sha1
class KexGroup14(KexGroup1):
# http://tools.ietf.org/html/rfc3526#section-3
P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF # noqa
G = 2
name = 'diffie-hellman-group14-sha1'
hash_algo = sha1
|
svn2github/gyp | refs/heads/master | pylib/gyp/win_tool.py | 6 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions for Windows builds.
These functions are executed via gyp-win-tool when using the ninja generator.
"""
import os
import re
import shutil
import subprocess
import stat
import string
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# A regex matching an argument corresponding to the output filename passed to
# link.exe.
_LINK_EXE_OUT_ARG = re.compile('/OUT:(?P<out>.+)$', re.IGNORECASE)
def main(args):
executor = WinTool()
exit_code = executor.Dispatch(args)
if exit_code is not None:
sys.exit(exit_code)
class WinTool(object):
"""This class performs all the Windows tooling steps. The methods can either
be executed directly, or dispatched from an argument list."""
def _UseSeparateMspdbsrv(self, env, args):
"""Allows to use a unique instance of mspdbsrv.exe per linker instead of a
shared one."""
if len(args) < 1:
raise Exception("Not enough arguments")
if args[0] != 'link.exe':
return
# Use the output filename passed to the linker to generate an endpoint name
# for mspdbsrv.exe.
endpoint_name = None
for arg in args:
m = _LINK_EXE_OUT_ARG.match(arg)
if m:
endpoint_name = re.sub(r'\W+', '',
'%s_%d' % (m.group('out'), os.getpid()))
break
if endpoint_name is None:
return
# Adds the appropriate environment variable. This will be read by link.exe
# to know which instance of mspdbsrv.exe it should connect to (if it's
# not set then the default endpoint is used).
env['_MSPDBSRV_ENDPOINT_'] = endpoint_name
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
return getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like recursive-mirror to RecursiveMirror."""
return name_string.title().replace('-', '')
def _GetEnv(self, arch):
"""Gets the saved environment from a file for a given architecture."""
# The environment is saved as an "environment block" (see CreateProcess
# and msvs_emulation for details). We convert to a dict here.
# Drop last 2 NULs, one for list terminator, one for trailing vs. separator.
pairs = open(arch).read()[:-2].split('\0')
kvs = [item.split('=', 1) for item in pairs]
return dict(kvs)
def ExecStamp(self, path):
"""Simple stamp command."""
open(path, 'w').close()
def ExecRecursiveMirror(self, source, dest):
"""Emulation of rm -rf out && cp -af in out."""
if os.path.exists(dest):
if os.path.isdir(dest):
def _on_error(fn, path, excinfo):
# The operation failed, possibly because the file is set to
# read-only. If that's why, make it writable and try the op again.
if not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWRITE)
fn(path)
shutil.rmtree(dest, onerror=_on_error)
else:
if not os.access(dest, os.W_OK):
# Attempt to make the file writable before deleting it.
os.chmod(dest, stat.S_IWRITE)
os.unlink(dest)
if os.path.isdir(source):
shutil.copytree(source, dest)
else:
shutil.copy2(source, dest)
def ExecLinkWrapper(self, arch, use_separate_mspdbsrv, *args):
"""Filter diagnostic output from link that looks like:
' Creating library ui.dll.lib and object ui.dll.exp'
This happens when there are exports from the dll or exe.
"""
env = self._GetEnv(arch)
if use_separate_mspdbsrv == 'True':
self._UseSeparateMspdbsrv(env, args)
link = subprocess.Popen([args[0].replace('/', '\\')] + list(args[1:]),
shell=True,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, _ = link.communicate()
for line in out.splitlines():
if not line.startswith(' Creating library '):
print line
return link.returncode
def ExecLinkWithManifests(self, arch, embed_manifest, out, ldcmd, resname,
mt, rc, intermediate_manifest, *manifests):
"""A wrapper for handling creating a manifest resource and then executing
a link command."""
# The 'normal' way to do manifests is to have link generate a manifest
# based on gathering dependencies from the object files, then merge that
# manifest with other manifests supplied as sources, convert the merged
# manifest to a resource, and then *relink*, including the compiled
# version of the manifest resource. This breaks incremental linking, and
# is generally overly complicated. Instead, we merge all the manifests
# provided (along with one that includes what would normally be in the
# linker-generated one, see msvs_emulation.py), and include that into the
# first and only link. We still tell link to generate a manifest, but we
# only use that to assert that our simpler process did not miss anything.
variables = {
'python': sys.executable,
'arch': arch,
'out': out,
'ldcmd': ldcmd,
'resname': resname,
'mt': mt,
'rc': rc,
'intermediate_manifest': intermediate_manifest,
'manifests': ' '.join(manifests),
}
add_to_ld = ''
if manifests:
subprocess.check_call(
'%(python)s gyp-win-tool manifest-wrapper %(arch)s %(mt)s -nologo '
'-manifest %(manifests)s -out:%(out)s.manifest' % variables)
if embed_manifest == 'True':
subprocess.check_call(
'%(python)s gyp-win-tool manifest-to-rc %(arch)s %(out)s.manifest'
' %(out)s.manifest.rc %(resname)s' % variables)
subprocess.check_call(
'%(python)s gyp-win-tool rc-wrapper %(arch)s %(rc)s '
'%(out)s.manifest.rc' % variables)
add_to_ld = ' %(out)s.manifest.res' % variables
subprocess.check_call(ldcmd + add_to_ld)
# Run mt.exe on the theoretically complete manifest we generated, merging
# it with the one the linker generated to confirm that the linker
# generated one does not add anything. This is strictly unnecessary for
# correctness, it's only to verify that e.g. /MANIFESTDEPENDENCY was not
# used in a #pragma comment.
if manifests:
# Merge the intermediate one with ours to .assert.manifest, then check
# that .assert.manifest is identical to ours.
subprocess.check_call(
'%(python)s gyp-win-tool manifest-wrapper %(arch)s %(mt)s -nologo '
'-manifest %(out)s.manifest %(intermediate_manifest)s '
'-out:%(out)s.assert.manifest' % variables)
assert_manifest = '%(out)s.assert.manifest' % variables
our_manifest = '%(out)s.manifest' % variables
# Load and normalize the manifests. mt.exe sometimes removes whitespace,
# and sometimes doesn't unfortunately.
with open(our_manifest, 'rb') as our_f:
with open(assert_manifest, 'rb') as assert_f:
our_data = our_f.read().translate(None, string.whitespace)
assert_data = assert_f.read().translate(None, string.whitespace)
if our_data != assert_data:
os.unlink(out)
def dump(filename):
sys.stderr.write('%s\n-----\n' % filename)
with open(filename, 'rb') as f:
sys.stderr.write(f.read() + '\n-----\n')
dump(intermediate_manifest)
dump(our_manifest)
dump(assert_manifest)
sys.stderr.write(
'Linker generated manifest "%s" added to final manifest "%s" '
'(result in "%s"). '
'Were /MANIFEST switches used in #pragma statements? ' % (
intermediate_manifest, our_manifest, assert_manifest))
return 1
def ExecManifestWrapper(self, arch, *args):
"""Run manifest tool with environment set. Strip out undesirable warning
(some XML blocks are recognized by the OS loader, but not the manifest
tool)."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if line and 'manifest authoring warning 81010002' not in line:
print line
return popen.returncode
def ExecManifestToRc(self, arch, *args):
"""Creates a resource file pointing a SxS assembly manifest.
|args| is tuple containing path to resource file, path to manifest file
and resource name which can be "1" (for executables) or "2" (for DLLs)."""
manifest_path, resource_path, resource_name = args
with open(resource_path, 'wb') as output:
output.write('#include <windows.h>\n%s RT_MANIFEST "%s"' % (
resource_name,
os.path.abspath(manifest_path).replace('\\', '/')))
def ExecMidlWrapper(self, arch, outdir, tlb, h, dlldata, iid, proxy, idl,
*flags):
"""Filter noisy filenames output from MIDL compile step that isn't
quietable via command line flags.
"""
args = ['midl', '/nologo'] + list(flags) + [
'/out', outdir,
'/tlb', tlb,
'/h', h,
'/dlldata', dlldata,
'/iid', iid,
'/proxy', proxy,
idl]
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
# Filter junk out of stdout, and write filtered versions. Output we want
# to filter is pairs of lines that look like this:
# Processing C:\Program Files (x86)\Microsoft SDKs\...\include\objidl.idl
# objidl.idl
lines = out.splitlines()
prefixes = ('Processing ', '64 bit Processing ')
processing = set(os.path.basename(x)
for x in lines if x.startswith(prefixes))
for line in lines:
if not line.startswith(prefixes) and line not in processing:
print line
return popen.returncode
def ExecAsmWrapper(self, arch, *args):
"""Filter logo banner from invocations of asm.exe."""
env = self._GetEnv(arch)
# MSVS doesn't assemble x64 asm files.
if arch == 'environment.x64':
return 0
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if (not line.startswith('Copyright (C) Microsoft Corporation') and
not line.startswith('Microsoft (R) Macro Assembler') and
not line.startswith(' Assembling: ') and
line):
print line
return popen.returncode
def ExecRcWrapper(self, arch, *args):
"""Filter logo banner from invocations of rc.exe. Older versions of RC
don't support the /nologo flag."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if (not line.startswith('Microsoft (R) Windows (R) Resource Compiler') and
not line.startswith('Copyright (C) Microsoft Corporation') and
line):
print line
return popen.returncode
def ExecActionWrapper(self, arch, rspfile, *dir):
"""Runs an action command line from a response file using the environment
for |arch|. If |dir| is supplied, use that as the working directory."""
env = self._GetEnv(arch)
# TODO(scottmg): This is a temporary hack to get some specific variables
# through to actions that are set after gyp-time. http://crbug.com/333738.
for k, v in os.environ.iteritems():
if k not in env:
env[k] = v
args = open(rspfile).read()
dir = dir[0] if dir else None
return subprocess.call(args, shell=True, env=env, cwd=dir)
def ExecClCompile(self, project_dir, selected_files):
"""Executed by msvs-ninja projects when the 'ClCompile' target is used to
build selected C/C++ files."""
project_dir = os.path.relpath(project_dir, BASE_DIR)
selected_files = selected_files.split(';')
ninja_targets = [os.path.join(project_dir, filename) + '^^'
for filename in selected_files]
cmd = ['ninja.exe']
cmd.extend(ninja_targets)
return subprocess.call(cmd, shell=True, cwd=BASE_DIR)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
alex/warehouse | refs/heads/master | warehouse/xml.py | 3 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
XML_CSP = {"style-src": ["'unsafe-inline'"]}
|
mancoast/CPythonPyc_test | refs/heads/master | cpython/221_test_sax.py | 3 | # regression test for SAX 2.0
# $Id: test_sax.py,v 1.19 2001/10/24 20:32:02 gvanrossum Exp $
from xml.sax import make_parser, ContentHandler, \
SAXException, SAXReaderNotAvailable, SAXParseException
try:
make_parser()
except SAXReaderNotAvailable:
# don't try to test this module if we cannot create a parser
raise ImportError("no XML parsers available")
from xml.sax.saxutils import XMLGenerator, escape, quoteattr, XMLFilterBase
from xml.sax.expatreader import create_parser
from xml.sax.xmlreader import InputSource, AttributesImpl, AttributesNSImpl
from cStringIO import StringIO
from test_support import verify, verbose, TestFailed, findfile
import os
# ===== Utilities
tests = 0
fails = 0
def confirm(outcome, name):
global tests, fails
tests = tests + 1
if outcome:
print "Passed", name
else:
print "Failed", name
fails = fails + 1
def test_make_parser2():
try:
# Creating parsers several times in a row should succeed.
# Testing this because there have been failures of this kind
# before.
from xml.sax import make_parser
p = make_parser()
from xml.sax import make_parser
p = make_parser()
from xml.sax import make_parser
p = make_parser()
from xml.sax import make_parser
p = make_parser()
from xml.sax import make_parser
p = make_parser()
from xml.sax import make_parser
p = make_parser()
except:
return 0
else:
return p
# ===========================================================================
#
# saxutils tests
#
# ===========================================================================
# ===== escape
def test_escape_basic():
return escape("Donald Duck & Co") == "Donald Duck & Co"
def test_escape_all():
return escape("<Donald Duck & Co>") == "<Donald Duck & Co>"
def test_escape_extra():
return escape("Hei på deg", {"å" : "å"}) == "Hei på deg"
# ===== quoteattr
def test_quoteattr_basic():
return quoteattr("Donald Duck & Co") == '"Donald Duck & Co"'
def test_single_quoteattr():
return (quoteattr('Includes "double" quotes')
== '\'Includes "double" quotes\'')
def test_double_quoteattr():
return (quoteattr("Includes 'single' quotes")
== "\"Includes 'single' quotes\"")
def test_single_double_quoteattr():
return (quoteattr("Includes 'single' and \"double\" quotes")
== "\"Includes 'single' and "double" quotes\"")
# ===== make_parser
def test_make_parser():
try:
# Creating a parser should succeed - it should fall back
# to the expatreader
p = make_parser(['xml.parsers.no_such_parser'])
except:
return 0
else:
return p
# ===== XMLGenerator
start = '<?xml version="1.0" encoding="iso-8859-1"?>\n'
def test_xmlgen_basic():
result = StringIO()
gen = XMLGenerator(result)
gen.startDocument()
gen.startElement("doc", {})
gen.endElement("doc")
gen.endDocument()
return result.getvalue() == start + "<doc></doc>"
def test_xmlgen_content():
result = StringIO()
gen = XMLGenerator(result)
gen.startDocument()
gen.startElement("doc", {})
gen.characters("huhei")
gen.endElement("doc")
gen.endDocument()
return result.getvalue() == start + "<doc>huhei</doc>"
def test_xmlgen_pi():
result = StringIO()
gen = XMLGenerator(result)
gen.startDocument()
gen.processingInstruction("test", "data")
gen.startElement("doc", {})
gen.endElement("doc")
gen.endDocument()
return result.getvalue() == start + "<?test data?><doc></doc>"
def test_xmlgen_content_escape():
result = StringIO()
gen = XMLGenerator(result)
gen.startDocument()
gen.startElement("doc", {})
gen.characters("<huhei&")
gen.endElement("doc")
gen.endDocument()
return result.getvalue() == start + "<doc><huhei&</doc>"
def test_xmlgen_attr_escape():
result = StringIO()
gen = XMLGenerator(result)
gen.startDocument()
gen.startElement("doc", {"a": '"'})
gen.startElement("e", {"a": "'"})
gen.endElement("e")
gen.startElement("e", {"a": "'\""})
gen.endElement("e")
gen.endElement("doc")
gen.endDocument()
return result.getvalue() == start \
+ "<doc a='\"'><e a=\"'\"></e><e a=\"'"\"></e></doc>"
def test_xmlgen_ignorable():
result = StringIO()
gen = XMLGenerator(result)
gen.startDocument()
gen.startElement("doc", {})
gen.ignorableWhitespace(" ")
gen.endElement("doc")
gen.endDocument()
return result.getvalue() == start + "<doc> </doc>"
ns_uri = "http://www.python.org/xml-ns/saxtest/"
def test_xmlgen_ns():
result = StringIO()
gen = XMLGenerator(result)
gen.startDocument()
gen.startPrefixMapping("ns1", ns_uri)
gen.startElementNS((ns_uri, "doc"), "ns1:doc", {})
# add an unqualified name
gen.startElementNS((None, "udoc"), None, {})
gen.endElementNS((None, "udoc"), None)
gen.endElementNS((ns_uri, "doc"), "ns1:doc")
gen.endPrefixMapping("ns1")
gen.endDocument()
return result.getvalue() == start + \
('<ns1:doc xmlns:ns1="%s"><udoc></udoc></ns1:doc>' %
ns_uri)
# ===== XMLFilterBase
def test_filter_basic():
result = StringIO()
gen = XMLGenerator(result)
filter = XMLFilterBase()
filter.setContentHandler(gen)
filter.startDocument()
filter.startElement("doc", {})
filter.characters("content")
filter.ignorableWhitespace(" ")
filter.endElement("doc")
filter.endDocument()
return result.getvalue() == start + "<doc>content </doc>"
# ===========================================================================
#
# expatreader tests
#
# ===========================================================================
# ===== XMLReader support
def test_expat_file():
parser = create_parser()
result = StringIO()
xmlgen = XMLGenerator(result)
parser.setContentHandler(xmlgen)
parser.parse(open(findfile("test"+os.extsep+"xml")))
return result.getvalue() == xml_test_out
# ===== DTDHandler support
class TestDTDHandler:
def __init__(self):
self._notations = []
self._entities = []
def notationDecl(self, name, publicId, systemId):
self._notations.append((name, publicId, systemId))
def unparsedEntityDecl(self, name, publicId, systemId, ndata):
self._entities.append((name, publicId, systemId, ndata))
def test_expat_dtdhandler():
parser = create_parser()
handler = TestDTDHandler()
parser.setDTDHandler(handler)
parser.feed('<!DOCTYPE doc [\n')
parser.feed(' <!ENTITY img SYSTEM "expat.gif" NDATA GIF>\n')
parser.feed(' <!NOTATION GIF PUBLIC "-//CompuServe//NOTATION Graphics Interchange Format 89a//EN">\n')
parser.feed(']>\n')
parser.feed('<doc></doc>')
parser.close()
return handler._notations == [("GIF", "-//CompuServe//NOTATION Graphics Interchange Format 89a//EN", None)] and \
handler._entities == [("img", None, "expat.gif", "GIF")]
# ===== EntityResolver support
class TestEntityResolver:
def resolveEntity(self, publicId, systemId):
inpsrc = InputSource()
inpsrc.setByteStream(StringIO("<entity/>"))
return inpsrc
def test_expat_entityresolver():
parser = create_parser()
parser.setEntityResolver(TestEntityResolver())
result = StringIO()
parser.setContentHandler(XMLGenerator(result))
parser.feed('<!DOCTYPE doc [\n')
parser.feed(' <!ENTITY test SYSTEM "whatever">\n')
parser.feed(']>\n')
parser.feed('<doc>&test;</doc>')
parser.close()
return result.getvalue() == start + "<doc><entity></entity></doc>"
# ===== Attributes support
class AttrGatherer(ContentHandler):
def startElement(self, name, attrs):
self._attrs = attrs
def startElementNS(self, name, qname, attrs):
self._attrs = attrs
def test_expat_attrs_empty():
parser = create_parser()
gather = AttrGatherer()
parser.setContentHandler(gather)
parser.feed("<doc/>")
parser.close()
return verify_empty_attrs(gather._attrs)
def test_expat_attrs_wattr():
parser = create_parser()
gather = AttrGatherer()
parser.setContentHandler(gather)
parser.feed("<doc attr='val'/>")
parser.close()
return verify_attrs_wattr(gather._attrs)
def test_expat_nsattrs_empty():
parser = create_parser(1)
gather = AttrGatherer()
parser.setContentHandler(gather)
parser.feed("<doc/>")
parser.close()
return verify_empty_nsattrs(gather._attrs)
def test_expat_nsattrs_wattr():
parser = create_parser(1)
gather = AttrGatherer()
parser.setContentHandler(gather)
parser.feed("<doc xmlns:ns='%s' ns:attr='val'/>" % ns_uri)
parser.close()
attrs = gather._attrs
return attrs.getLength() == 1 and \
attrs.getNames() == [(ns_uri, "attr")] and \
attrs.getQNames() == [] and \
len(attrs) == 1 and \
attrs.has_key((ns_uri, "attr")) and \
attrs.keys() == [(ns_uri, "attr")] and \
attrs.get((ns_uri, "attr")) == "val" and \
attrs.get((ns_uri, "attr"), 25) == "val" and \
attrs.items() == [((ns_uri, "attr"), "val")] and \
attrs.values() == ["val"] and \
attrs.getValue((ns_uri, "attr")) == "val" and \
attrs[(ns_uri, "attr")] == "val"
# ===== InputSource support
xml_test_out = open(findfile("test"+os.extsep+"xml"+os.extsep+"out")).read()
def test_expat_inpsource_filename():
parser = create_parser()
result = StringIO()
xmlgen = XMLGenerator(result)
parser.setContentHandler(xmlgen)
parser.parse(findfile("test"+os.extsep+"xml"))
return result.getvalue() == xml_test_out
def test_expat_inpsource_sysid():
parser = create_parser()
result = StringIO()
xmlgen = XMLGenerator(result)
parser.setContentHandler(xmlgen)
parser.parse(InputSource(findfile("test"+os.extsep+"xml")))
return result.getvalue() == xml_test_out
def test_expat_inpsource_stream():
parser = create_parser()
result = StringIO()
xmlgen = XMLGenerator(result)
parser.setContentHandler(xmlgen)
inpsrc = InputSource()
inpsrc.setByteStream(open(findfile("test"+os.extsep+"xml")))
parser.parse(inpsrc)
return result.getvalue() == xml_test_out
# ===== IncrementalParser support
def test_expat_incremental():
result = StringIO()
xmlgen = XMLGenerator(result)
parser = create_parser()
parser.setContentHandler(xmlgen)
parser.feed("<doc>")
parser.feed("</doc>")
parser.close()
return result.getvalue() == start + "<doc></doc>"
def test_expat_incremental_reset():
result = StringIO()
xmlgen = XMLGenerator(result)
parser = create_parser()
parser.setContentHandler(xmlgen)
parser.feed("<doc>")
parser.feed("text")
result = StringIO()
xmlgen = XMLGenerator(result)
parser.setContentHandler(xmlgen)
parser.reset()
parser.feed("<doc>")
parser.feed("text")
parser.feed("</doc>")
parser.close()
return result.getvalue() == start + "<doc>text</doc>"
# ===== Locator support
def test_expat_locator_noinfo():
result = StringIO()
xmlgen = XMLGenerator(result)
parser = create_parser()
parser.setContentHandler(xmlgen)
parser.feed("<doc>")
parser.feed("</doc>")
parser.close()
return parser.getSystemId() is None and \
parser.getPublicId() is None and \
parser.getLineNumber() == 1
def test_expat_locator_withinfo():
result = StringIO()
xmlgen = XMLGenerator(result)
parser = create_parser()
parser.setContentHandler(xmlgen)
parser.parse(findfile("test.xml"))
return parser.getSystemId() == findfile("test.xml") and \
parser.getPublicId() is None
# ===========================================================================
#
# error reporting
#
# ===========================================================================
def test_expat_inpsource_location():
parser = create_parser()
parser.setContentHandler(ContentHandler()) # do nothing
source = InputSource()
source.setByteStream(StringIO("<foo bar foobar>")) #ill-formed
name = "a file name"
source.setSystemId(name)
try:
parser.parse(source)
except SAXException, e:
return e.getSystemId() == name
def test_expat_incomplete():
parser = create_parser()
parser.setContentHandler(ContentHandler()) # do nothing
try:
parser.parse(StringIO("<foo>"))
except SAXParseException:
return 1 # ok, error found
else:
return 0
# ===========================================================================
#
# xmlreader tests
#
# ===========================================================================
# ===== AttributesImpl
def verify_empty_attrs(attrs):
try:
attrs.getValue("attr")
gvk = 0
except KeyError:
gvk = 1
try:
attrs.getValueByQName("attr")
gvqk = 0
except KeyError:
gvqk = 1
try:
attrs.getNameByQName("attr")
gnqk = 0
except KeyError:
gnqk = 1
try:
attrs.getQNameByName("attr")
gqnk = 0
except KeyError:
gqnk = 1
try:
attrs["attr"]
gik = 0
except KeyError:
gik = 1
return attrs.getLength() == 0 and \
attrs.getNames() == [] and \
attrs.getQNames() == [] and \
len(attrs) == 0 and \
not attrs.has_key("attr") and \
attrs.keys() == [] and \
attrs.get("attrs") is None and \
attrs.get("attrs", 25) == 25 and \
attrs.items() == [] and \
attrs.values() == [] and \
gvk and gvqk and gnqk and gik and gqnk
def verify_attrs_wattr(attrs):
return attrs.getLength() == 1 and \
attrs.getNames() == ["attr"] and \
attrs.getQNames() == ["attr"] and \
len(attrs) == 1 and \
attrs.has_key("attr") and \
attrs.keys() == ["attr"] and \
attrs.get("attr") == "val" and \
attrs.get("attr", 25) == "val" and \
attrs.items() == [("attr", "val")] and \
attrs.values() == ["val"] and \
attrs.getValue("attr") == "val" and \
attrs.getValueByQName("attr") == "val" and \
attrs.getNameByQName("attr") == "attr" and \
attrs["attr"] == "val" and \
attrs.getQNameByName("attr") == "attr"
def test_attrs_empty():
return verify_empty_attrs(AttributesImpl({}))
def test_attrs_wattr():
return verify_attrs_wattr(AttributesImpl({"attr" : "val"}))
# ===== AttributesImpl
def verify_empty_nsattrs(attrs):
try:
attrs.getValue((ns_uri, "attr"))
gvk = 0
except KeyError:
gvk = 1
try:
attrs.getValueByQName("ns:attr")
gvqk = 0
except KeyError:
gvqk = 1
try:
attrs.getNameByQName("ns:attr")
gnqk = 0
except KeyError:
gnqk = 1
try:
attrs.getQNameByName((ns_uri, "attr"))
gqnk = 0
except KeyError:
gqnk = 1
try:
attrs[(ns_uri, "attr")]
gik = 0
except KeyError:
gik = 1
return attrs.getLength() == 0 and \
attrs.getNames() == [] and \
attrs.getQNames() == [] and \
len(attrs) == 0 and \
not attrs.has_key((ns_uri, "attr")) and \
attrs.keys() == [] and \
attrs.get((ns_uri, "attr")) is None and \
attrs.get((ns_uri, "attr"), 25) == 25 and \
attrs.items() == [] and \
attrs.values() == [] and \
gvk and gvqk and gnqk and gik and gqnk
def test_nsattrs_empty():
return verify_empty_nsattrs(AttributesNSImpl({}, {}))
def test_nsattrs_wattr():
attrs = AttributesNSImpl({(ns_uri, "attr") : "val"},
{(ns_uri, "attr") : "ns:attr"})
return attrs.getLength() == 1 and \
attrs.getNames() == [(ns_uri, "attr")] and \
attrs.getQNames() == ["ns:attr"] and \
len(attrs) == 1 and \
attrs.has_key((ns_uri, "attr")) and \
attrs.keys() == [(ns_uri, "attr")] and \
attrs.get((ns_uri, "attr")) == "val" and \
attrs.get((ns_uri, "attr"), 25) == "val" and \
attrs.items() == [((ns_uri, "attr"), "val")] and \
attrs.values() == ["val"] and \
attrs.getValue((ns_uri, "attr")) == "val" and \
attrs.getValueByQName("ns:attr") == "val" and \
attrs.getNameByQName("ns:attr") == (ns_uri, "attr") and \
attrs[(ns_uri, "attr")] == "val" and \
attrs.getQNameByName((ns_uri, "attr")) == "ns:attr"
# ===== Main program
def make_test_output():
parser = create_parser()
result = StringIO()
xmlgen = XMLGenerator(result)
parser.setContentHandler(xmlgen)
parser.parse(findfile("test"+os.extsep+"xml"))
outf = open(findfile("test"+os.extsep+"xml"+os.extsep+"out"), "w")
outf.write(result.getvalue())
outf.close()
items = locals().items()
items.sort()
for (name, value) in items:
if name[ : 5] == "test_":
confirm(value(), name)
print "%d tests, %d failures" % (tests, fails)
if fails != 0:
raise TestFailed, "%d of %d tests failed" % (fails, tests)
|
pymango/pymango | refs/heads/master | misc/python/mango/math/_rotation.py | 1 |
import scipy as sp
import numpy as np
import mango.mpi as mpi
import math
logger, rootLogger = mpi.getLoggers(__name__)
def rotation_matrix(angle, axis, dim=3, dtype="float64"):
"""
Returns rotation matrix for specified degree angle and
coordinate axis of rotation.
:type angle: :obj:`float`
:param angle: Angle of rotation in degrees.
:type axis: :obj:`int`
:param axis: Index of the axis of rotation (for :samp:`{dim}=3`, :samp:`{axis}=0`
is the z-axis, :samp:`{axis}=1` is the y-axis and:samp:`{axis}=2`
is the x-axis.
:type dim: :obj:`int`
:param dim: Rotation spatial dimension.
:rtype: :obj:`numpy.array`
:return: A :samp:`(dim, dim)` shaped rotation matrix.
"""
I = sp.eye(dim, dim, dtype=dtype)
u = sp.zeros((dim,1), dtype=dtype)
v = sp.zeros((dim,1), dtype=dtype)
u[(axis+dim-2) % dim] = 1
v[(axis+dim-1) % dim] = 1
# rootLogger.debug("u = %s" % str(u))
# rootLogger.debug("u.T = %s" % str(u.T))
# rootLogger.debug("u.dot(u.T) = %s" % str(u.dot(u.T)))
#
# rootLogger.debug("v = %s" % str(v))
# rootLogger.debug("v.T = %s" % str(v.T))
theta = sp.pi/180. * angle
R = I + sp.sin(theta)*(v.dot(u.T) - u.dot(v.T)) + (sp.cos(theta) - 1)*(u.dot(u.T) + v.dot(v.T))
rootLogger.debug("R = %s" % str(R))
return R
def axis_angle_to_rotation_matrix(direction, angle):
"""
Convert 3D axis and angle of rotation to 3x3 rotation matrix.
:type direction: 3 sequence of :obj:`float`
:param direction: Axis of rotation.
:type angle: :obj:`float`
:param angle: Radian angle of rotation about axis.
:rtype: :obj:`numpy.array`
:return: 3x3 rotation matrix.
"""
d = np.array(direction, dtype=direction.dtype)
eye = np.eye(3, 3, dtype=d.dtype)
mtx = eye
dNorm = np.linalg.norm(d)
if ((angle != 0) and (dNorm > 0)):
d /= dNorm
ddt = np.outer(d, d)
skew = np.array([[ 0, d[2], -d[1]],
[-d[2], 0, d[0]],
[ d[1], -d[0], 0]], dtype=d.dtype).T
mtx = ddt + np.cos(angle) * (eye - ddt) + np.sin(angle) * skew
return mtx
def axis_angle_from_rotation_matrix(rm):
"""
Converts 3x3 rotation matrix to axis and angle representation.
:type rm: 3x3 :obj:`float` matrix
:param rm: Rotation matrix.
:rtype: :obj:`tuple`
:return: :samp:`(axis, radian_angle)` pair (angle in radians).
"""
eps = (16*sp.finfo(rm.dtype).eps)
aa = sp.array((0,0,1), dtype=rm.dtype)
theta = aa[0];
c = (sp.trace(rm) - 1)/2;
if (c > 1):
c = 1;
if (c < -1):
c = -1;
if (math.fabs(math.fabs(c)-1) >= eps):
theta = math.acos(c);
s = math.sqrt(1-c*c);
inv2s = 1/(2*s);
aa[0] = inv2s*(rm[2,1] - rm[1,2]);
aa[1] = inv2s*(rm[0,2] - rm[2,0]);
aa[2] = inv2s*(rm[1,0] - rm[0,1]);
elif (c >= 0):
theta = 0;
else:
rmI = (rm + sp.eye(3,3,dtype=rm.dtype));
theta = np.pi;
for i in range(0,3):
n2 = np.linalg.norm(rmI[:,i]);
if (n2 > 0):
aa = col(rmI, i);
break;
return aa, theta
def rotation_matrix_from_cross_prod(a,b):
"""
Returns the rotation matrix which rotates the
vector :samp:`a` onto the the vector :samp:`b`.
:type a: 3 sequence of :obj:`float`
:param a: Vector to be rotated on to :samp:`{b}`.
:type b: 3 sequence of :obj:`float`
:param b: Vector.
:rtype: :obj:`numpy.array`
:return: 3D rotation matrix.
"""
crs = np.cross(a,b)
dotProd = np.dot(a,b)
crsNorm = sp.linalg.norm(crs)
eps = sp.sqrt(sp.finfo(a.dtype).eps)
r = sp.eye(a.size, a.size, dtype=a.dtype)
if (crsNorm > eps):
theta = sp.arctan2(crsNorm, dotProd)
r = axis_angle_to_rotation_matrix(crs, theta)
elif (dotProd < 0):
r = -r
return r
def axis_angle_from_cross_prod(a,b):
"""
Returns the :samp:`(axis, radian_angle)` rotation which rotates the
vector :samp:`a` onto the the vector :samp:`b`.
:type a: 3 sequence of :obj:`float`
:param a: Vector to be rotated on to :samp:`{b}`.
:type b: 3 sequence of :obj:`float`
:param b: Vector.
:rtype: :obj:`tuple`
:return: samp:`(axis, radian_angle)` pair (angle in radians).
"""
crs = np.cross(a,b)
dotProd = np.dot(a,b)
crsNorm = sp.linalg.norm(crs)
theta = sp.arctan2(crsNorm, dotProd)
return crs, theta
|
Ecotrust/marineplanner-core | refs/heads/master | marineplanner/core/tests.py | 24123 | from django.test import TestCase
# Create your tests here.
|
gclenaghan/scikit-learn | refs/heads/master | sklearn/grid_search.py | 4 | """
The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters
of an estimator.
"""
from __future__ import print_function
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from .base import BaseEstimator, is_classifier, clone
from .base import MetaEstimatorMixin
from .cross_validation import check_cv
from .cross_validation import _fit_and_score
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import check_random_state
from .utils.random import sample_without_replacement
from .utils.validation import _num_samples, indexable
from .utils.metaestimators import if_delegate_has_method
from .metrics.scorer import check_scoring
from .exceptions import ChangedBehaviorWarning
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
warnings.warn("This module has been deprecated in favor of the "
"model_selection module into which all the refactored classes "
"and functions are moved. This module will be removed in 0.19.",
DeprecationWarning)
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.grid_search import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
uses ``ParameterGrid`` to perform a full parallelized parameter search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of string to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept
a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.grid_search import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d." % (grid_size, self.n_iter)
+ " For exhaustive searches, use GridSearchCV.")
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs()
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params, error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for v in p.values():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if True not in check:
raise ValueError("Parameter values should be a list.")
if len(v) == 0:
raise ValueError("Parameter values should be a non-empty "
"list.")
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit.
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
Notes
-----
* The long-standing behavior of this method changed in version 0.16.
* It no longer uses the metric provided by ``estimator.score`` if the
``scoring`` parameter was set when fitting.
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
if self.scoring is not None and hasattr(self.best_estimator_, 'score'):
warnings.warn("The long-standing behavior to use the estimator's "
"score function in {0}.score has changed. The "
"scoring parameter is now used."
"".format(self.__class__.__name__),
ChangedBehaviorWarning)
return self.scorer_(self.best_estimator_, X, y)
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate='estimator')
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate='estimator')
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found parameters.
Only available if the underlying estimator implements ``inverse_transform`` and
``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(Xt)
def _fit(self, X, y, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = self.cv
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
n_samples = _num_samples(X)
X, y = indexable(X, y)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(
delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv)
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
n_folds = len(cv)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_folds):
n_test_samples = 0
score = 0
all_scores = []
for this_score, this_n_test_samples, _, parameters in \
out[grid_start:grid_start + n_folds]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_folds)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated grid-search over a parameter grid.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Examples
--------
>>> from sklearn import svm, grid_search, datasets
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = grid_search.GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
decision_function_shape=None, degree=..., gamma=...,
kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=...,
verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=...,
scoring=..., verbose=...)
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a an hyperparameter grid.
:func:`sklearn.cross_validation.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise'):
super(GridSearchCV, self).__init__(
estimator, scoring, fit_params, n_jobs, iid,
refit, cv, verbose, pre_dispatch, error_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
return self._fit(X, y, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated search over parameter settings.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settins, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def fit(self, X, y=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, sampled_params)
|
hashems/Mobile-Cloud-Development-Projects | refs/heads/master | appengine/standard/images/api/main.py | 9 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Sample application that demonstrates how to use the App Engine Images API.
For more information, see README.md.
"""
# [START all]
# [START thumbnailer]
from google.appengine.api import images
from google.appengine.ext import ndb
import webapp2
class Photo(ndb.Model):
title = ndb.StringProperty()
full_size_image = ndb.BlobProperty()
class Thumbnailer(webapp2.RequestHandler):
def get(self):
if self.request.get("id"):
photo = Photo.get_by_id(int(self.request.get("id")))
if photo:
img = images.Image(photo.full_size_image)
img.resize(width=80, height=100)
img.im_feeling_lucky()
thumbnail = img.execute_transforms(output_encoding=images.JPEG)
self.response.headers['Content-Type'] = 'image/jpeg'
self.response.out.write(thumbnail)
return
# Either "id" wasn't provided, or there was no image with that ID
# in the datastore.
self.error(404)
# [END thumbnailer]
app = webapp2.WSGIApplication([('/img', Thumbnailer)], debug=True)
# [END all]
|
hashamali/pyScss | refs/heads/master | scss/grammar/expression.py | 3 | """Grammar for parsing Sass expressions."""
# This is a GENERATED FILE -- DO NOT EDIT DIRECTLY!
# Edit scss/grammar/expression.g, then run:
#
# python2 yapps2.py scss/grammar/expression.g
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
import operator
import re
from scss.ast import Parentheses
from scss.ast import UnaryOp
from scss.ast import BinaryOp
from scss.ast import AnyOp
from scss.ast import AllOp
from scss.ast import NotOp
from scss.ast import CallOp
from scss.ast import Interpolation
from scss.ast import Literal
from scss.ast import Variable
from scss.ast import ListLiteral
from scss.ast import MapLiteral
from scss.ast import ArgspecLiteral
from scss.ast import FunctionLiteral
from scss.ast import AlphaFunctionLiteral
from scss.cssdefs import unescape
from scss.types import Color
from scss.types import Function
from scss.types import Number
from scss.types import String
from scss.types import Url
from scss.grammar import Parser
from scss.grammar import Scanner
class SassExpressionScanner(Scanner):
patterns = None
_patterns = [
('"="', '='),
('"opacity"', 'opacity'),
('":"', ':'),
('","', ','),
('SINGLE_STRING_GUTS', "([^'\\\\#]|[\\\\].|#(?![{]))*"),
('DOUBLE_STRING_GUTS', '([^"\\\\#]|[\\\\].|#(?![{]))*'),
('INTERP_ANYTHING', '([^#]|#(?![{]))*'),
('INTERP_NO_VARS', '([^#$]|#(?![{]))*'),
('INTERP_NO_PARENS', '([^#()]|#(?![{]))*'),
('INTERP_START_URL_HACK', '(?=[#][{])'),
('INTERP_START', '#[{]'),
('SPACE', '[ \r\t\n]+'),
('[ \r\t\n]+', '[ \r\t\n]+'),
('LPAR', '\\(|\\['),
('RPAR', '\\)|\\]'),
('END', '$'),
('MUL', '[*]'),
('DIV', '/'),
('MOD', '(?<=\\s)%'),
('ADD', '[+]'),
('SUB', '-\\s'),
('SIGN', '-(?![a-zA-Z_])'),
('AND', '(?<![-\\w])and(?![-\\w])'),
('OR', '(?<![-\\w])or(?![-\\w])'),
('NOT', '(?<![-\\w])not(?![-\\w])'),
('NE', '!='),
('INV', '!'),
('EQ', '=='),
('LE', '<='),
('GE', '>='),
('LT', '<'),
('GT', '>'),
('DOTDOTDOT', '[.]{3}'),
('SINGLE_QUOTE', "'"),
('DOUBLE_QUOTE', '"'),
('BAREURL_HEAD_HACK', '((?:[\\\\].|[^#$\'"()\\x00-\\x08\\x0b\\x0e-\\x20\\x7f]|#(?![{]))+)(?=#[{]|\\s*[)])'),
('BAREURL', '(?:[\\\\].|[^#$\'"()\\x00-\\x08\\x0b\\x0e-\\x20\\x7f]|#(?![{]))+'),
('UNITS', '(?<!\\s)(?:[a-zA-Z]+|%)(?![-\\w])'),
('NUM', '(?:\\d+(?:\\.\\d*)?|\\.\\d+)'),
('COLOR', '#(?:[a-fA-F0-9]{6}|[a-fA-F0-9]{3})(?![a-fA-F0-9])'),
('KWVAR', '\\$[-a-zA-Z0-9_]+(?=\\s*:)'),
('SLURPYVAR', '\\$[-a-zA-Z0-9_]+(?=[.][.][.])'),
('VAR', '\\$[-a-zA-Z0-9_]+'),
('LITERAL_FUNCTION', '(calc|expression|progid:[\\w.]+)(?=[(])'),
('ALPHA_FUNCTION', 'alpha(?=[(])'),
('URL_FUNCTION', 'url(?=[(])'),
('FNCT', '[-a-zA-Z_][-a-zA-Z0-9_]*(?=\\()'),
('BAREWORD', '(?!\\d)(\\\\[0-9a-fA-F]{1,6}|\\\\.|[-a-zA-Z0-9_])+'),
('BANG_IMPORTANT', '!\\s*important'),
('INTERP_END', '[}]'),
]
def __init__(self, input=None):
if hasattr(self, 'setup_patterns'):
self.setup_patterns(self._patterns)
elif self.patterns is None:
self.__class__.patterns = []
for t, p in self._patterns:
self.patterns.append((t, re.compile(p)))
super(SassExpressionScanner, self).__init__(None, ['[ \r\t\n]+'], input)
class SassExpression(Parser):
def goal(self):
expr_lst = self.expr_lst()
END = self._scan('END')
return expr_lst
def goal_argspec(self):
argspec = self.argspec()
END = self._scan('END')
return argspec
def argspec(self):
_token_ = self._peek(self.argspec_rsts)
if _token_ not in self.argspec_chks:
if self._peek(self.argspec_rsts_) not in self.argspec_chks_:
argspec_items = self.argspec_items()
args, slurpy = argspec_items
return ArgspecLiteral(args, slurp=slurpy)
return ArgspecLiteral([])
elif _token_ == 'SLURPYVAR':
SLURPYVAR = self._scan('SLURPYVAR')
DOTDOTDOT = self._scan('DOTDOTDOT')
return ArgspecLiteral([], slurp=SLURPYVAR)
else: # == 'DOTDOTDOT'
DOTDOTDOT = self._scan('DOTDOTDOT')
return ArgspecLiteral([], slurp=all)
def argspec_items(self):
slurpy = None
argspec_item = self.argspec_item()
args = [argspec_item]
if self._peek(self.argspec_items_rsts) == '","':
self._scan('","')
if self._peek(self.argspec_items_rsts_) not in self.argspec_chks_:
_token_ = self._peek(self.argspec_items_rsts__)
if _token_ == 'SLURPYVAR':
SLURPYVAR = self._scan('SLURPYVAR')
DOTDOTDOT = self._scan('DOTDOTDOT')
slurpy = SLURPYVAR
elif _token_ == 'DOTDOTDOT':
DOTDOTDOT = self._scan('DOTDOTDOT')
slurpy = all
else: # in self.argspec_items_chks
argspec_items = self.argspec_items()
more_args, slurpy = argspec_items
args.extend(more_args)
return args, slurpy
def argspec_item(self):
_token_ = self._peek(self.argspec_items_chks)
if _token_ == 'KWVAR':
KWVAR = self._scan('KWVAR')
self._scan('":"')
expr_slst = self.expr_slst()
return (Variable(KWVAR), expr_slst)
else: # in self.argspec_item_chks
expr_slst = self.expr_slst()
return (None, expr_slst)
def expr_map_or_list(self):
expr_slst = self.expr_slst()
first = expr_slst
_token_ = self._peek(self.expr_map_or_list_rsts)
if _token_ == '":"':
self._scan('":"')
expr_slst = self.expr_slst()
pairs = [(first, expr_slst)]
while self._peek(self.expr_map_or_list_rsts_) == '","':
self._scan('","')
map_item = None, None
if self._peek(self.expr_map_or_list_rsts__) not in self.expr_map_or_list_rsts_:
map_item = self.map_item()
pairs.append(map_item)
return MapLiteral(pairs)
else: # in self.expr_map_or_list_rsts_
items = [first]; use_list = False
while self._peek(self.expr_map_or_list_rsts_) == '","':
self._scan('","')
use_list = True
expr_slst = self.expr_slst()
items.append(expr_slst)
return ListLiteral(items) if use_list else items[0]
def map_item(self):
expr_slst = self.expr_slst()
left = expr_slst
self._scan('":"')
expr_slst = self.expr_slst()
return (left, expr_slst)
def expr_lst(self):
expr_slst = self.expr_slst()
v = [expr_slst]
while self._peek(self.expr_lst_rsts) == '","':
self._scan('","')
expr_slst = self.expr_slst()
v.append(expr_slst)
return ListLiteral(v) if len(v) > 1 else v[0]
def expr_slst(self):
or_expr = self.or_expr()
v = [or_expr]
while self._peek(self.expr_slst_rsts) not in self.expr_slst_chks:
or_expr = self.or_expr()
v.append(or_expr)
return ListLiteral(v, comma=False) if len(v) > 1 else v[0]
def or_expr(self):
and_expr = self.and_expr()
v = and_expr
while self._peek(self.or_expr_rsts) == 'OR':
OR = self._scan('OR')
and_expr = self.and_expr()
v = AnyOp(v, and_expr)
return v
def and_expr(self):
not_expr = self.not_expr()
v = not_expr
while self._peek(self.and_expr_rsts) == 'AND':
AND = self._scan('AND')
not_expr = self.not_expr()
v = AllOp(v, not_expr)
return v
def not_expr(self):
_token_ = self._peek(self.argspec_item_chks)
if _token_ != 'NOT':
comparison = self.comparison()
return comparison
else: # == 'NOT'
NOT = self._scan('NOT')
not_expr = self.not_expr()
return NotOp(not_expr)
def comparison(self):
a_expr = self.a_expr()
v = a_expr
while self._peek(self.comparison_rsts) in self.comparison_chks:
_token_ = self._peek(self.comparison_chks)
if _token_ == 'LT':
LT = self._scan('LT')
a_expr = self.a_expr()
v = BinaryOp(operator.lt, v, a_expr)
elif _token_ == 'GT':
GT = self._scan('GT')
a_expr = self.a_expr()
v = BinaryOp(operator.gt, v, a_expr)
elif _token_ == 'LE':
LE = self._scan('LE')
a_expr = self.a_expr()
v = BinaryOp(operator.le, v, a_expr)
elif _token_ == 'GE':
GE = self._scan('GE')
a_expr = self.a_expr()
v = BinaryOp(operator.ge, v, a_expr)
elif _token_ == 'EQ':
EQ = self._scan('EQ')
a_expr = self.a_expr()
v = BinaryOp(operator.eq, v, a_expr)
else: # == 'NE'
NE = self._scan('NE')
a_expr = self.a_expr()
v = BinaryOp(operator.ne, v, a_expr)
return v
def a_expr(self):
m_expr = self.m_expr()
v = m_expr
while self._peek(self.a_expr_rsts) in self.a_expr_chks:
_token_ = self._peek(self.a_expr_chks)
if _token_ == 'ADD':
ADD = self._scan('ADD')
m_expr = self.m_expr()
v = BinaryOp(operator.add, v, m_expr)
else: # == 'SUB'
SUB = self._scan('SUB')
m_expr = self.m_expr()
v = BinaryOp(operator.sub, v, m_expr)
return v
def m_expr(self):
u_expr = self.u_expr()
v = u_expr
while self._peek(self.m_expr_rsts) in self.m_expr_chks:
_token_ = self._peek(self.m_expr_chks)
if _token_ == 'MUL':
MUL = self._scan('MUL')
u_expr = self.u_expr()
v = BinaryOp(operator.mul, v, u_expr)
elif _token_ == 'DIV':
DIV = self._scan('DIV')
u_expr = self.u_expr()
v = BinaryOp(operator.truediv, v, u_expr)
else: # == 'MOD'
MOD = self._scan('MOD')
u_expr = self.u_expr()
v = BinaryOp(operator.mod, v, u_expr)
return v
def u_expr(self):
_token_ = self._peek(self.u_expr_rsts)
if _token_ == 'SIGN':
SIGN = self._scan('SIGN')
u_expr = self.u_expr()
return UnaryOp(operator.neg, u_expr)
elif _token_ == 'ADD':
ADD = self._scan('ADD')
u_expr = self.u_expr()
return UnaryOp(operator.pos, u_expr)
else: # in self.u_expr_chks
atom = self.atom()
return atom
def atom(self):
_token_ = self._peek(self.u_expr_chks)
if _token_ == 'LPAR':
LPAR = self._scan('LPAR')
_token_ = self._peek(self.atom_rsts)
if _token_ == 'RPAR':
v = ListLiteral([], comma=False)
else: # in self.argspec_item_chks
expr_map_or_list = self.expr_map_or_list()
v = expr_map_or_list
RPAR = self._scan('RPAR')
return Parentheses(v)
elif _token_ == 'URL_FUNCTION':
URL_FUNCTION = self._scan('URL_FUNCTION')
LPAR = self._scan('LPAR')
interpolated_url = self.interpolated_url()
RPAR = self._scan('RPAR')
return interpolated_url
elif _token_ == 'ALPHA_FUNCTION':
ALPHA_FUNCTION = self._scan('ALPHA_FUNCTION')
LPAR = self._scan('LPAR')
_token_ = self._peek(self.atom_rsts_)
if _token_ == '"opacity"':
self._scan('"opacity"')
self._scan('"="')
atom = self.atom()
RPAR = self._scan('RPAR')
return AlphaFunctionLiteral(atom)
else: # in self.atom_chks
argspec = self.argspec()
RPAR = self._scan('RPAR')
return CallOp("alpha", argspec)
elif _token_ == 'LITERAL_FUNCTION':
LITERAL_FUNCTION = self._scan('LITERAL_FUNCTION')
LPAR = self._scan('LPAR')
interpolated_function = self.interpolated_function()
RPAR = self._scan('RPAR')
return Interpolation.maybe(interpolated_function, type=Function, function_name=LITERAL_FUNCTION)
elif _token_ == 'FNCT':
FNCT = self._scan('FNCT')
LPAR = self._scan('LPAR')
argspec = self.argspec()
RPAR = self._scan('RPAR')
return CallOp(FNCT, argspec)
elif _token_ == 'BANG_IMPORTANT':
BANG_IMPORTANT = self._scan('BANG_IMPORTANT')
return Literal(String.unquoted("!important", literal=True))
elif _token_ in self.atom_chks_:
interpolated_bareword = self.interpolated_bareword()
return Interpolation.maybe(interpolated_bareword)
elif _token_ == 'NUM':
NUM = self._scan('NUM')
UNITS = None
if self._peek(self.atom_rsts__) == 'UNITS':
UNITS = self._scan('UNITS')
return Literal(Number(float(NUM), unit=UNITS))
elif _token_ not in self.atom_chks__:
interpolated_string = self.interpolated_string()
return interpolated_string
elif _token_ == 'COLOR':
COLOR = self._scan('COLOR')
return Literal(Color.from_hex(COLOR, literal=True))
else: # == 'VAR'
VAR = self._scan('VAR')
return Variable(VAR)
def interpolation(self):
INTERP_START = self._scan('INTERP_START')
expr_lst = self.expr_lst()
INTERP_END = self._scan('INTERP_END')
return expr_lst
def interpolated_url(self):
_token_ = self._peek(self.interpolated_url_rsts)
if _token_ in self.interpolated_url_chks:
interpolated_bare_url = self.interpolated_bare_url()
return Interpolation.maybe(interpolated_bare_url, type=Url, quotes=None)
else: # in self.argspec_item_chks
expr_lst = self.expr_lst()
return FunctionLiteral(expr_lst, "url")
def interpolated_bare_url(self):
_token_ = self._peek(self.interpolated_url_chks)
if _token_ == 'BAREURL_HEAD_HACK':
BAREURL_HEAD_HACK = self._scan('BAREURL_HEAD_HACK')
parts = [BAREURL_HEAD_HACK]
else: # == 'INTERP_START_URL_HACK'
INTERP_START_URL_HACK = self._scan('INTERP_START_URL_HACK')
parts = ['']
while self._peek(self.interpolated_bare_url_rsts) == 'INTERP_START':
interpolation = self.interpolation()
parts.append(interpolation)
_token_ = self._peek(self.interpolated_bare_url_rsts_)
if _token_ == 'BAREURL':
BAREURL = self._scan('BAREURL')
parts.append(BAREURL)
elif _token_ == 'SPACE':
SPACE = self._scan('SPACE')
return parts
else: # in self.interpolated_bare_url_rsts
parts.append('')
return parts
def interpolated_string(self):
_token_ = self._peek(self.interpolated_string_rsts)
if _token_ == 'SINGLE_QUOTE':
interpolated_string_single = self.interpolated_string_single()
return Interpolation.maybe(interpolated_string_single, quotes="'")
else: # == 'DOUBLE_QUOTE'
interpolated_string_double = self.interpolated_string_double()
return Interpolation.maybe(interpolated_string_double, quotes='"')
def interpolated_string_single(self):
SINGLE_QUOTE = self._scan('SINGLE_QUOTE')
SINGLE_STRING_GUTS = self._scan('SINGLE_STRING_GUTS')
parts = [unescape(SINGLE_STRING_GUTS)]
while self._peek(self.interpolated_string_single_rsts) == 'INTERP_START':
interpolation = self.interpolation()
parts.append(interpolation)
SINGLE_STRING_GUTS = self._scan('SINGLE_STRING_GUTS')
parts.append(unescape(SINGLE_STRING_GUTS))
SINGLE_QUOTE = self._scan('SINGLE_QUOTE')
return parts
def interpolated_string_double(self):
DOUBLE_QUOTE = self._scan('DOUBLE_QUOTE')
DOUBLE_STRING_GUTS = self._scan('DOUBLE_STRING_GUTS')
parts = [unescape(DOUBLE_STRING_GUTS)]
while self._peek(self.interpolated_string_double_rsts) == 'INTERP_START':
interpolation = self.interpolation()
parts.append(interpolation)
DOUBLE_STRING_GUTS = self._scan('DOUBLE_STRING_GUTS')
parts.append(unescape(DOUBLE_STRING_GUTS))
DOUBLE_QUOTE = self._scan('DOUBLE_QUOTE')
return parts
def interpolated_bareword(self):
_token_ = self._peek(self.atom_chks_)
if _token_ == 'BAREWORD':
BAREWORD = self._scan('BAREWORD')
parts = [BAREWORD]
if self._peek(self.interpolated_bareword_rsts) == 'SPACE':
SPACE = self._scan('SPACE')
return parts
else: # == 'INTERP_START'
interpolation = self.interpolation()
parts = ['', interpolation]
_token_ = self._peek(self.interpolated_bareword_rsts_)
if _token_ == 'BAREWORD':
BAREWORD = self._scan('BAREWORD')
parts.append(BAREWORD)
elif _token_ == 'SPACE':
SPACE = self._scan('SPACE')
return parts
elif 1:
parts.append('')
while self._peek(self.interpolated_bareword_rsts__) == 'INTERP_START':
interpolation = self.interpolation()
parts.append(interpolation)
_token_ = self._peek(self.interpolated_bareword_rsts_)
if _token_ == 'BAREWORD':
BAREWORD = self._scan('BAREWORD')
parts.append(BAREWORD)
elif _token_ == 'SPACE':
SPACE = self._scan('SPACE')
return parts
elif 1:
parts.append('')
return parts
def interpolated_function(self):
interpolated_function_parens = self.interpolated_function_parens()
parts = interpolated_function_parens
while self._peek(self.interpolated_bare_url_rsts) == 'INTERP_START':
interpolation = self.interpolation()
parts.append(interpolation)
interpolated_function_parens = self.interpolated_function_parens()
parts.extend(interpolated_function_parens)
return parts
def interpolated_function_parens(self):
INTERP_NO_PARENS = self._scan('INTERP_NO_PARENS')
parts = [INTERP_NO_PARENS]
while self._peek(self.interpolated_function_parens_rsts) == 'LPAR':
LPAR = self._scan('LPAR')
interpolated_function = self.interpolated_function()
parts = parts[:-1] + [parts[-1] + LPAR + interpolated_function[0]] + interpolated_function[1:]
RPAR = self._scan('RPAR')
INTERP_NO_PARENS = self._scan('INTERP_NO_PARENS')
parts[-1] += RPAR + INTERP_NO_PARENS
return parts
def goal_interpolated_literal(self):
INTERP_ANYTHING = self._scan('INTERP_ANYTHING')
parts = [INTERP_ANYTHING]
while self._peek(self.goal_interpolated_literal_rsts) == 'INTERP_START':
interpolation = self.interpolation()
parts.append(interpolation)
INTERP_ANYTHING = self._scan('INTERP_ANYTHING')
parts.append(INTERP_ANYTHING)
END = self._scan('END')
return Interpolation.maybe(parts)
def goal_interpolated_literal_with_vars(self):
INTERP_NO_VARS = self._scan('INTERP_NO_VARS')
parts = [INTERP_NO_VARS]
while self._peek(self.goal_interpolated_literal_with_vars_rsts) != 'END':
_token_ = self._peek(self.goal_interpolated_literal_with_vars_rsts_)
if _token_ == 'INTERP_START':
interpolation = self.interpolation()
parts.append(interpolation)
else: # == 'VAR'
VAR = self._scan('VAR')
parts.append(Variable(VAR))
INTERP_NO_VARS = self._scan('INTERP_NO_VARS')
parts.append(INTERP_NO_VARS)
END = self._scan('END')
return Interpolation.maybe(parts)
atom_chks_ = frozenset(['BAREWORD', 'INTERP_START'])
expr_map_or_list_rsts__ = frozenset(['LPAR', 'DOUBLE_QUOTE', 'VAR', 'URL_FUNCTION', 'BAREWORD', 'COLOR', 'ALPHA_FUNCTION', 'INTERP_START', 'SIGN', 'LITERAL_FUNCTION', 'ADD', 'NUM', 'RPAR', 'FNCT', 'NOT', 'BANG_IMPORTANT', 'SINGLE_QUOTE', '","'])
u_expr_chks = frozenset(['LPAR', 'DOUBLE_QUOTE', 'BAREWORD', 'URL_FUNCTION', 'INTERP_START', 'COLOR', 'ALPHA_FUNCTION', 'VAR', 'NUM', 'FNCT', 'LITERAL_FUNCTION', 'BANG_IMPORTANT', 'SINGLE_QUOTE'])
m_expr_rsts = frozenset(['LPAR', 'DOUBLE_QUOTE', 'SUB', 'ALPHA_FUNCTION', 'RPAR', 'MUL', 'INTERP_END', 'BANG_IMPORTANT', 'DIV', 'LE', 'URL_FUNCTION', 'INTERP_START', 'COLOR', 'NE', 'LT', 'NUM', '":"', 'LITERAL_FUNCTION', 'GT', 'END', 'SIGN', 'BAREWORD', 'GE', 'FNCT', 'VAR', 'EQ', 'AND', 'ADD', 'SINGLE_QUOTE', 'NOT', 'MOD', 'OR', '","'])
interpolated_bare_url_rsts_ = frozenset(['RPAR', 'INTERP_START', 'BAREURL', 'SPACE'])
argspec_items_rsts = frozenset(['RPAR', 'END', '","'])
expr_slst_chks = frozenset(['INTERP_END', 'RPAR', 'END', '":"', '","'])
expr_lst_rsts = frozenset(['INTERP_END', 'RPAR', 'END', '","'])
goal_interpolated_literal_rsts = frozenset(['END', 'INTERP_START'])
expr_map_or_list_rsts = frozenset(['RPAR', '":"', '","'])
goal_interpolated_literal_with_vars_rsts = frozenset(['VAR', 'END', 'INTERP_START'])
argspec_item_chks = frozenset(['LPAR', 'DOUBLE_QUOTE', 'VAR', 'URL_FUNCTION', 'BAREWORD', 'COLOR', 'ALPHA_FUNCTION', 'INTERP_START', 'SIGN', 'LITERAL_FUNCTION', 'ADD', 'NUM', 'FNCT', 'NOT', 'BANG_IMPORTANT', 'SINGLE_QUOTE'])
a_expr_chks = frozenset(['ADD', 'SUB'])
interpolated_function_parens_rsts = frozenset(['LPAR', 'RPAR', 'INTERP_START'])
expr_slst_rsts = frozenset(['LPAR', 'DOUBLE_QUOTE', 'VAR', 'END', 'URL_FUNCTION', 'BAREWORD', 'COLOR', 'ALPHA_FUNCTION', 'INTERP_START', 'FNCT', 'SIGN', 'LITERAL_FUNCTION', 'ADD', 'NUM', 'RPAR', '":"', 'NOT', 'INTERP_END', 'BANG_IMPORTANT', 'SINGLE_QUOTE', '","'])
interpolated_bareword_rsts = frozenset(['LPAR', 'DOUBLE_QUOTE', 'SUB', 'ALPHA_FUNCTION', 'RPAR', 'MUL', 'INTERP_END', 'BANG_IMPORTANT', 'DIV', 'LE', 'URL_FUNCTION', 'INTERP_START', 'COLOR', 'NE', 'LT', 'NUM', '":"', 'LITERAL_FUNCTION', 'GT', 'END', 'SPACE', 'SIGN', 'BAREWORD', 'GE', 'FNCT', 'VAR', 'EQ', 'AND', 'ADD', 'SINGLE_QUOTE', 'NOT', 'MOD', 'OR', '","'])
atom_rsts__ = frozenset(['LPAR', 'DOUBLE_QUOTE', 'SUB', 'ALPHA_FUNCTION', 'RPAR', 'VAR', 'MUL', 'INTERP_END', 'BANG_IMPORTANT', 'DIV', 'LE', 'URL_FUNCTION', 'INTERP_START', 'COLOR', 'NE', 'LT', 'NUM', '":"', 'LITERAL_FUNCTION', 'GT', 'END', 'SIGN', 'BAREWORD', 'GE', 'FNCT', 'UNITS', 'EQ', 'AND', 'ADD', 'SINGLE_QUOTE', 'NOT', 'MOD', 'OR', '","'])
or_expr_rsts = frozenset(['LPAR', 'DOUBLE_QUOTE', 'ALPHA_FUNCTION', 'RPAR', 'INTERP_END', 'BANG_IMPORTANT', 'URL_FUNCTION', 'INTERP_START', 'COLOR', 'NUM', '":"', 'BAREWORD', 'END', 'SIGN', 'LITERAL_FUNCTION', 'ADD', 'FNCT', 'VAR', 'OR', 'NOT', 'SINGLE_QUOTE', '","'])
argspec_chks_ = frozenset(['END', 'RPAR'])
interpolated_string_single_rsts = frozenset(['SINGLE_QUOTE', 'INTERP_START'])
interpolated_bareword_rsts_ = frozenset(['LPAR', 'DOUBLE_QUOTE', 'SUB', 'ALPHA_FUNCTION', 'RPAR', 'MUL', 'DIV', 'BANG_IMPORTANT', 'INTERP_END', 'LE', 'URL_FUNCTION', 'INTERP_START', 'COLOR', 'NE', 'LT', 'NUM', '":"', 'BAREWORD', 'GT', 'END', 'SPACE', 'SIGN', 'LITERAL_FUNCTION', 'GE', 'FNCT', 'VAR', 'EQ', 'AND', 'ADD', 'SINGLE_QUOTE', 'NOT', 'MOD', 'OR', '","'])
and_expr_rsts = frozenset(['LPAR', 'DOUBLE_QUOTE', 'ALPHA_FUNCTION', 'RPAR', 'INTERP_END', 'BANG_IMPORTANT', 'URL_FUNCTION', 'INTERP_START', 'COLOR', 'NUM', '":"', 'BAREWORD', 'END', 'SIGN', 'LITERAL_FUNCTION', 'ADD', 'FNCT', 'VAR', 'AND', 'OR', 'NOT', 'SINGLE_QUOTE', '","'])
comparison_rsts = frozenset(['LPAR', 'DOUBLE_QUOTE', 'ALPHA_FUNCTION', 'RPAR', 'INTERP_END', 'BANG_IMPORTANT', 'LE', 'URL_FUNCTION', 'INTERP_START', 'COLOR', 'NE', 'LT', 'NUM', '":"', 'LITERAL_FUNCTION', 'GT', 'END', 'SIGN', 'BAREWORD', 'ADD', 'FNCT', 'VAR', 'EQ', 'AND', 'GE', 'SINGLE_QUOTE', 'NOT', 'OR', '","'])
argspec_chks = frozenset(['DOTDOTDOT', 'SLURPYVAR'])
atom_rsts_ = frozenset(['KWVAR', 'LPAR', 'DOUBLE_QUOTE', 'SLURPYVAR', 'ALPHA_FUNCTION', 'RPAR', 'BANG_IMPORTANT', '"opacity"', 'URL_FUNCTION', 'INTERP_START', 'COLOR', 'NUM', 'BAREWORD', 'END', 'SIGN', 'LITERAL_FUNCTION', 'ADD', 'FNCT', 'VAR', 'DOTDOTDOT', 'NOT', 'SINGLE_QUOTE'])
interpolated_string_double_rsts = frozenset(['DOUBLE_QUOTE', 'INTERP_START'])
atom_chks__ = frozenset(['COLOR', 'VAR'])
expr_map_or_list_rsts_ = frozenset(['RPAR', '","'])
u_expr_rsts = frozenset(['LPAR', 'DOUBLE_QUOTE', 'BAREWORD', 'URL_FUNCTION', 'INTERP_START', 'COLOR', 'ALPHA_FUNCTION', 'SIGN', 'VAR', 'ADD', 'NUM', 'FNCT', 'LITERAL_FUNCTION', 'BANG_IMPORTANT', 'SINGLE_QUOTE'])
interpolated_url_chks = frozenset(['INTERP_START_URL_HACK', 'BAREURL_HEAD_HACK'])
atom_chks = frozenset(['KWVAR', 'LPAR', 'DOUBLE_QUOTE', 'BANG_IMPORTANT', 'END', 'SLURPYVAR', 'URL_FUNCTION', 'BAREWORD', 'COLOR', 'ALPHA_FUNCTION', 'DOTDOTDOT', 'INTERP_START', 'RPAR', 'LITERAL_FUNCTION', 'ADD', 'NUM', 'VAR', 'FNCT', 'NOT', 'SIGN', 'SINGLE_QUOTE'])
interpolated_url_rsts = frozenset(['LPAR', 'DOUBLE_QUOTE', 'VAR', 'SINGLE_QUOTE', 'URL_FUNCTION', 'BAREWORD', 'COLOR', 'ALPHA_FUNCTION', 'INTERP_START', 'SIGN', 'LITERAL_FUNCTION', 'ADD', 'NUM', 'FNCT', 'NOT', 'INTERP_START_URL_HACK', 'BANG_IMPORTANT', 'BAREURL_HEAD_HACK'])
comparison_chks = frozenset(['GT', 'GE', 'NE', 'LT', 'LE', 'EQ'])
argspec_items_rsts_ = frozenset(['KWVAR', 'LPAR', 'DOUBLE_QUOTE', 'VAR', 'END', 'SLURPYVAR', 'URL_FUNCTION', 'BAREWORD', 'COLOR', 'ALPHA_FUNCTION', 'DOTDOTDOT', 'INTERP_START', 'SIGN', 'LITERAL_FUNCTION', 'ADD', 'NUM', 'RPAR', 'FNCT', 'NOT', 'BANG_IMPORTANT', 'SINGLE_QUOTE'])
a_expr_rsts = frozenset(['LPAR', 'DOUBLE_QUOTE', 'SUB', 'ALPHA_FUNCTION', 'RPAR', 'INTERP_END', 'BANG_IMPORTANT', 'LE', 'URL_FUNCTION', 'INTERP_START', 'COLOR', 'NE', 'LT', 'NUM', '":"', 'LITERAL_FUNCTION', 'GT', 'END', 'SIGN', 'BAREWORD', 'GE', 'FNCT', 'VAR', 'EQ', 'AND', 'ADD', 'SINGLE_QUOTE', 'NOT', 'OR', '","'])
interpolated_string_rsts = frozenset(['DOUBLE_QUOTE', 'SINGLE_QUOTE'])
interpolated_bareword_rsts__ = frozenset(['LPAR', 'DOUBLE_QUOTE', 'SUB', 'ALPHA_FUNCTION', 'RPAR', 'MUL', 'INTERP_END', 'BANG_IMPORTANT', 'DIV', 'LE', 'URL_FUNCTION', 'INTERP_START', 'COLOR', 'NE', 'LT', 'NUM', '":"', 'LITERAL_FUNCTION', 'GT', 'END', 'SIGN', 'BAREWORD', 'GE', 'FNCT', 'VAR', 'EQ', 'AND', 'ADD', 'SINGLE_QUOTE', 'NOT', 'MOD', 'OR', '","'])
m_expr_chks = frozenset(['MUL', 'DIV', 'MOD'])
goal_interpolated_literal_with_vars_rsts_ = frozenset(['VAR', 'INTERP_START'])
interpolated_bare_url_rsts = frozenset(['RPAR', 'INTERP_START'])
argspec_items_chks = frozenset(['KWVAR', 'LPAR', 'DOUBLE_QUOTE', 'VAR', 'URL_FUNCTION', 'BAREWORD', 'COLOR', 'ALPHA_FUNCTION', 'INTERP_START', 'SIGN', 'LITERAL_FUNCTION', 'ADD', 'NUM', 'FNCT', 'NOT', 'BANG_IMPORTANT', 'SINGLE_QUOTE'])
argspec_rsts = frozenset(['KWVAR', 'LPAR', 'DOUBLE_QUOTE', 'BANG_IMPORTANT', 'END', 'SLURPYVAR', 'URL_FUNCTION', 'BAREWORD', 'COLOR', 'ALPHA_FUNCTION', 'DOTDOTDOT', 'INTERP_START', 'RPAR', 'LITERAL_FUNCTION', 'ADD', 'NUM', 'VAR', 'FNCT', 'NOT', 'SIGN', 'SINGLE_QUOTE'])
atom_rsts = frozenset(['LPAR', 'DOUBLE_QUOTE', 'BANG_IMPORTANT', 'URL_FUNCTION', 'BAREWORD', 'COLOR', 'ALPHA_FUNCTION', 'INTERP_START', 'SIGN', 'LITERAL_FUNCTION', 'ADD', 'NUM', 'VAR', 'FNCT', 'NOT', 'RPAR', 'SINGLE_QUOTE'])
argspec_items_rsts__ = frozenset(['KWVAR', 'LPAR', 'DOUBLE_QUOTE', 'VAR', 'SLURPYVAR', 'URL_FUNCTION', 'BAREWORD', 'COLOR', 'ALPHA_FUNCTION', 'DOTDOTDOT', 'INTERP_START', 'SIGN', 'LITERAL_FUNCTION', 'ADD', 'NUM', 'FNCT', 'NOT', 'BANG_IMPORTANT', 'SINGLE_QUOTE'])
argspec_rsts_ = frozenset(['KWVAR', 'LPAR', 'DOUBLE_QUOTE', 'BANG_IMPORTANT', 'END', 'URL_FUNCTION', 'BAREWORD', 'COLOR', 'ALPHA_FUNCTION', 'INTERP_START', 'SIGN', 'LITERAL_FUNCTION', 'ADD', 'NUM', 'VAR', 'FNCT', 'NOT', 'RPAR', 'SINGLE_QUOTE'])
|
aspidites/django | refs/heads/master | tests/field_deconstruction/tests.py | 69 | from __future__ import unicode_literals
from django.db import models
from django.test import SimpleTestCase, override_settings
from django.utils import six
class FieldDeconstructionTests(SimpleTestCase):
"""
Tests the deconstruct() method on all core fields.
"""
def test_name(self):
"""
Tests the outputting of the correct name if assigned one.
"""
# First try using a "normal" field
field = models.CharField(max_length=65)
name, path, args, kwargs = field.deconstruct()
self.assertIsNone(name)
field.set_attributes_from_name("is_awesome_test")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(name, "is_awesome_test")
self.assertIsInstance(name, six.text_type)
# Now try with a ForeignKey
field = models.ForeignKey("some_fake.ModelName", models.CASCADE)
name, path, args, kwargs = field.deconstruct()
self.assertIsNone(name)
field.set_attributes_from_name("author")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(name, "author")
def test_auto_field(self):
field = models.AutoField(primary_key=True)
field.set_attributes_from_name("id")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.AutoField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"primary_key": True})
def test_big_integer_field(self):
field = models.BigIntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.BigIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_boolean_field(self):
field = models.BooleanField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.BooleanField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.BooleanField(default=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.BooleanField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"default": True})
def test_char_field(self):
field = models.CharField(max_length=65)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.CharField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 65})
field = models.CharField(max_length=65, null=True, blank=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.CharField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 65, "null": True, "blank": True})
def test_char_field_choices(self):
field = models.CharField(max_length=1, choices=(("A", "One"), ("B", "Two")))
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.CharField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"choices": [("A", "One"), ("B", "Two")], "max_length": 1})
def test_csi_field(self):
field = models.CommaSeparatedIntegerField(max_length=100)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.CommaSeparatedIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 100})
def test_date_field(self):
field = models.DateField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DateField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.DateField(auto_now=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DateField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"auto_now": True})
def test_datetime_field(self):
field = models.DateTimeField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DateTimeField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.DateTimeField(auto_now_add=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DateTimeField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"auto_now_add": True})
# Bug #21785
field = models.DateTimeField(auto_now=True, auto_now_add=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DateTimeField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"auto_now_add": True, "auto_now": True})
def test_decimal_field(self):
field = models.DecimalField(max_digits=5, decimal_places=2)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DecimalField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_digits": 5, "decimal_places": 2})
def test_decimal_field_0_decimal_places(self):
"""
A DecimalField with decimal_places=0 should work (#22272).
"""
field = models.DecimalField(max_digits=5, decimal_places=0)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DecimalField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_digits": 5, "decimal_places": 0})
def test_email_field(self):
field = models.EmailField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.EmailField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 254})
field = models.EmailField(max_length=255)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.EmailField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 255})
def test_file_field(self):
field = models.FileField(upload_to="foo/bar")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.FileField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"upload_to": "foo/bar"})
# Test max_length
field = models.FileField(upload_to="foo/bar", max_length=200)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.FileField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"upload_to": "foo/bar", "max_length": 200})
def test_file_path_field(self):
field = models.FilePathField(match=".*\.txt$")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.FilePathField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"match": ".*\.txt$"})
field = models.FilePathField(recursive=True, allow_folders=True, max_length=123)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.FilePathField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"recursive": True, "allow_folders": True, "max_length": 123})
def test_float_field(self):
field = models.FloatField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.FloatField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_foreign_key(self):
# Test basic pointing
from django.contrib.auth.models import Permission
field = models.ForeignKey("auth.Permission", models.CASCADE)
field.remote_field.model = Permission
field.remote_field.field_name = "id"
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "on_delete": models.CASCADE})
self.assertFalse(hasattr(kwargs['to'], "setting_name"))
# Test swap detection for swappable model
field = models.ForeignKey("auth.User", models.CASCADE)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.User", "on_delete": models.CASCADE})
self.assertEqual(kwargs['to'].setting_name, "AUTH_USER_MODEL")
# Test nonexistent (for now) model
field = models.ForeignKey("something.Else", models.CASCADE)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "something.Else", "on_delete": models.CASCADE})
# Test on_delete
field = models.ForeignKey("auth.User", models.SET_NULL)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.User", "on_delete": models.SET_NULL})
# Test to_field preservation
field = models.ForeignKey("auth.Permission", models.CASCADE, to_field="foobar")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "to_field": "foobar", "on_delete": models.CASCADE})
# Test related_name preservation
field = models.ForeignKey("auth.Permission", models.CASCADE, related_name="foobar")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "related_name": "foobar", "on_delete": models.CASCADE})
@override_settings(AUTH_USER_MODEL="auth.Permission")
def test_foreign_key_swapped(self):
# It doesn't matter that we swapped out user for permission;
# there's no validation. We just want to check the setting stuff works.
field = models.ForeignKey("auth.Permission", models.CASCADE)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "on_delete": models.CASCADE})
self.assertEqual(kwargs['to'].setting_name, "AUTH_USER_MODEL")
def test_image_field(self):
field = models.ImageField(upload_to="foo/barness", width_field="width", height_field="height")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ImageField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"upload_to": "foo/barness", "width_field": "width", "height_field": "height"})
def test_integer_field(self):
field = models.IntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.IntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_ip_address_field(self):
field = models.IPAddressField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.IPAddressField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_generic_ip_address_field(self):
field = models.GenericIPAddressField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.GenericIPAddressField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.GenericIPAddressField(protocol="IPv6")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.GenericIPAddressField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"protocol": "IPv6"})
def test_many_to_many_field(self):
# Test normal
field = models.ManyToManyField("auth.Permission")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission"})
self.assertFalse(hasattr(kwargs['to'], "setting_name"))
# Test swappable
field = models.ManyToManyField("auth.User")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.User"})
self.assertEqual(kwargs['to'].setting_name, "AUTH_USER_MODEL")
# Test through
field = models.ManyToManyField("auth.Permission", through="auth.Group")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "through": "auth.Group"})
# Test custom db_table
field = models.ManyToManyField("auth.Permission", db_table="custom_table")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "db_table": "custom_table"})
# Test related_name
field = models.ManyToManyField("auth.Permission", related_name="custom_table")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "related_name": "custom_table"})
@override_settings(AUTH_USER_MODEL="auth.Permission")
def test_many_to_many_field_swapped(self):
# It doesn't matter that we swapped out user for permission;
# there's no validation. We just want to check the setting stuff works.
field = models.ManyToManyField("auth.Permission")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission"})
self.assertEqual(kwargs['to'].setting_name, "AUTH_USER_MODEL")
def test_null_boolean_field(self):
field = models.NullBooleanField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.NullBooleanField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_positive_integer_field(self):
field = models.PositiveIntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.PositiveIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_positive_small_integer_field(self):
field = models.PositiveSmallIntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.PositiveSmallIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_slug_field(self):
field = models.SlugField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.SlugField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.SlugField(db_index=False, max_length=231)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.SlugField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"db_index": False, "max_length": 231})
def test_small_integer_field(self):
field = models.SmallIntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.SmallIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_text_field(self):
field = models.TextField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.TextField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_time_field(self):
field = models.TimeField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.TimeField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.TimeField(auto_now=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(args, [])
self.assertEqual(kwargs, {'auto_now': True})
field = models.TimeField(auto_now_add=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(args, [])
self.assertEqual(kwargs, {'auto_now_add': True})
def test_url_field(self):
field = models.URLField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.URLField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.URLField(max_length=231)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.URLField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 231})
def test_binary_field(self):
field = models.BinaryField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.BinaryField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
|
creasyw/IMTAphy | refs/heads/master | documentation/doctools/tags/0.3/sphinx/ext/autodoc.py | 2 | # -*- coding: utf-8 -*-
"""
sphinx.ext.autodoc
~~~~~~~~~~~~~~~~~~
Automatically insert docstrings for functions, classes or whole modules into
the doctree, thus avoiding duplication between docstrings and documentation
for those who like elaborate docstrings.
:copyright: 2008 by Georg Brandl.
:license: BSD.
"""
import re
import types
import inspect
import textwrap
import linecache
from docutils import nodes
from docutils.parsers.rst import directives
from docutils.statemachine import ViewList
from sphinx.util import rpartition
try:
base_exception = BaseException
except NameError:
base_exception = Exception
_charset_re = re.compile(r'coding[:=]\s*([-\w.]+)')
_module_charsets = {}
def isdescriptor(x):
"""Check if the object is some kind of descriptor."""
for item in '__get__', '__set__', '__delete__':
if callable(getattr(x, item, None)):
return True
return False
def prepare_docstring(s):
"""
Convert a docstring into lines of parseable reST. Return it as a list of
lines usable for inserting into a docutils ViewList (used as argument
of nested_parse().) An empty line is added to act as a separator between
this docstring and following content.
"""
if not s or s.isspace():
return ['']
s = s.expandtabs()
nl = s.rstrip().find('\n')
if nl == -1:
# Only one line...
return [s.strip(), '']
# The first line may be indented differently...
firstline = s[:nl].strip()
otherlines = textwrap.dedent(s[nl+1:])
return [firstline] + otherlines.splitlines() + ['']
def get_module_charset(module):
"""Return the charset of the given module."""
if module in _module_charsets:
return _module_charsets[module]
filename = __import__(module, None, None, ['']).__file__
if filename[-4:] in ('.pyc', '.pyo'):
filename = filename[:-1]
for line in [linecache.getline(filename, x) for x in (1, 2)]:
match = _charset_re.search(line)
if match is not None:
charset = match.group(1)
break
else:
charset = 'ascii'
_module_charsets[module] = charset
return charset
def generate_rst(what, name, members, inherited, undoc, add_content, document,
lineno, indent='', filename_set=None, check_module=False):
env = document.settings.env
# find out what to import
if what == 'module':
mod = obj = name
objpath = []
elif what in ('class', 'exception', 'function'):
mod, obj = rpartition(name, '.')
if not mod and hasattr(env, 'autodoc_current_module'):
mod = env.autodoc_current_module
if not mod:
mod = env.currmodule
objpath = [obj]
else:
mod_cls, obj = rpartition(name, '.')
if not mod_cls and hasattr(env, 'autodoc_current_class'):
mod_cls = env.autodoc_current_class
if not mod_cls:
mod_cls = env.currclass
mod, cls = rpartition(mod_cls, '.')
if not mod and hasattr(env, 'autodoc_current_module'):
mod = env.autodoc_current_module
if not mod:
mod = env.currmodule
objpath = [cls, obj]
result = ViewList()
docstrings = []
if mod is None:
warning = document.reporter.warning(
'don\'t know which module to import for documenting %r '
'(try placing a "module" or "currentmodule" directive in the document, '
'or giving an explicit module name)' % name, line=lineno)
return [warning], result
try:
todoc = module = __import__(mod, None, None, ['foo'])
if filename_set is not None and hasattr(module, '__file__') and module.__file__:
modfile = module.__file__
if modfile.lower().endswith('.pyc') or modfile.lower().endswith('.pyo'):
modfile = modfile[:-1]
filename_set.add(modfile)
for part in objpath:
todoc = getattr(todoc, part)
if check_module:
# only checking __module__ for members not given explicitly
if hasattr(todoc, '__module__'):
if todoc.__module__ != mod:
return [], result
if getattr(todoc, '__doc__', None):
docstrings.append(todoc.__doc__)
except (ImportError, AttributeError):
warning = document.reporter.warning(
'autodoc can\'t import/find %s %r, check your spelling '
'and sys.path' % (what, str(name)), line=lineno)
return [warning], result
# add directive header
try:
if what == 'class':
args = inspect.formatargspec(*inspect.getargspec(todoc.__init__))
if args[1:7] == 'self, ':
args = '(' + args[7:]
elif args == '(self)':
args = '()'
elif what in ('function', 'method'):
args = inspect.formatargspec(*inspect.getargspec(todoc))
if what == 'method':
if args[1:7] == 'self, ':
args = '(' + args[7:]
elif args == '(self)':
args = '()'
else:
args = ''
except Exception:
args = ''
if len(objpath) == 2:
qualname = '%s.%s' % (cls, obj)
else:
qualname = obj
result.append(indent + '.. %s:: %s%s' % (what, qualname, args), '<autodoc>')
result.append('', '<autodoc>')
# the module directive doesn't want content
if what != 'module':
indent += ' '
# add docstring content
if what == 'module' and env.config.automodule_skip_lines and docstrings[0]:
docstrings[0] = '\n'.join(docstring.splitlines()
[env.config.automodule_skip_lines:])
# for classes, what the "docstring" is can be controlled via an option
if what in ('class', 'exception'):
content = env.config.autoclass_content
if content in ('both', 'init'):
initdocstring = getattr(todoc, '__init__', None).__doc__
# for new-style classes, no __init__ means default __init__
if initdocstring == object.__init__.__doc__:
initdocstring = None
if initdocstring:
if content == 'init':
docstrings = [initdocstring]
else:
docstrings.append('\n\n' + initdocstring)
# the default is only the class docstring
# get the encoding of the docstring
module = getattr(todoc, '__module__', None)
if module is not None:
charset = get_module_charset(module)
docstrings = [docstring.decode(charset) for docstring in docstrings]
for docstring in docstrings:
docstring = prepare_docstring(docstring)
for i, line in enumerate(docstring):
result.append(indent + line, '<docstring of %s>' % name, i)
# add source content, if present
if add_content:
for line, src in zip(add_content.data, add_content.items):
result.append(indent + line, src[0], src[1])
if not members or what in ('function', 'method', 'attribute'):
return [], result
env.autodoc_current_module = mod
if objpath:
env.autodoc_current_class = objpath[0]
warnings = []
# add members, if possible
_all = members == ['__all__']
members_check_module = False
if _all:
if what == 'module':
# for implicit module members, check __module__ to avoid documenting
# imported objects
members_check_module = True
all_members = inspect.getmembers(todoc)
else:
if inherited:
# getmembers() uses dir() which pulls in members from all base classes
all_members = inspect.getmembers(todoc)
else:
# __dict__ contains only the members directly defined in the class
all_members = sorted(todoc.__dict__.iteritems())
else:
all_members = [(mname, getattr(todoc, mname)) for mname in members]
for (membername, member) in all_members:
if _all and membername.startswith('_'):
continue
doc = getattr(member, '__doc__', None)
if not undoc and not doc:
continue
if what == 'module':
if isinstance(member, types.FunctionType):
memberwhat = 'function'
elif isinstance(member, types.ClassType) or \
isinstance(member, type):
if issubclass(member, base_exception):
memberwhat = 'exception'
else:
memberwhat = 'class'
else:
# XXX: todo -- attribute docs
continue
else:
if callable(member):
memberwhat = 'method'
elif isdescriptor(member):
memberwhat = 'attribute'
else:
# XXX: todo -- attribute docs
continue
full_membername = name + '.' + membername
subwarn, subres = generate_rst(memberwhat, full_membername, ['__all__'],
inherited, undoc, None, document, lineno,
indent, check_module=members_check_module)
warnings.extend(subwarn)
result.extend(subres)
env.autodoc_current_module = None
env.autodoc_current_class = None
return warnings, result
def _auto_directive(dirname, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
what = dirname[4:]
name = arguments[0]
members = options.get('members', [])
inherited = 'inherited-members' in options
if inherited and not members:
# :inherited-members: implies :members:
members = ['__all__']
undoc = 'undoc-members' in options
filename_set = set()
warnings, result = generate_rst(what, name, members, inherited, undoc, content,
state.document, lineno, filename_set=filename_set)
# record all filenames as dependencies -- this will at least partially make
# automatic invalidation possible
for fn in filename_set:
state.document.settings.env.note_dependency(fn)
if dirname == 'automodule':
node = nodes.section()
# hack around title style bookkeeping
surrounding_title_styles = state.memo.title_styles
surrounding_section_level = state.memo.section_level
state.memo.title_styles = []
state.memo.section_level = 0
state.nested_parse(result, content_offset, node, match_titles=1)
state.memo.title_styles = surrounding_title_styles
state.memo.section_level = surrounding_section_level
else:
node = nodes.paragraph()
state.nested_parse(result, content_offset, node)
return warnings + node.children
def auto_directive(*args, **kwds):
return _auto_directive(*args, **kwds)
def auto_directive_withmembers(*args, **kwds):
return _auto_directive(*args, **kwds)
def members_directive(arg):
if arg is None:
return ['__all__']
return [x.strip() for x in arg.split(',')]
def setup(app):
mod_options = {'members': members_directive, 'undoc-members': directives.flag}
cls_options = {'members': members_directive, 'undoc-members': directives.flag,
'inherited-members': directives.flag}
app.add_directive('automodule', auto_directive_withmembers,
1, (1, 0, 1), **mod_options)
app.add_directive('autoclass', auto_directive_withmembers,
1, (1, 0, 1), **cls_options)
app.add_directive('autoexception', auto_directive_withmembers,
1, (1, 0, 1), **cls_options)
app.add_directive('autofunction', auto_directive, 1, (1, 0, 1))
app.add_directive('automethod', auto_directive, 1, (1, 0, 1))
app.add_directive('autoattribute', auto_directive, 1, (1, 0, 1))
app.add_config_value('automodule_skip_lines', 0, True)
app.add_config_value('autoclass_content', 'class', True)
|
obulpathi/poppy | refs/heads/master | poppy/storage/mockdb/flavors.py | 3 | # Copyright (c) 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from poppy.model import flavor
from poppy.storage import base
class FlavorsController(base.FlavorsController):
@property
def session(self):
return self._driver.database
def list(self):
f = flavor.Flavor(
"standard",
[flavor.Provider("mock", "www.mock_provider.com")]
)
return [f]
def get(self, flavor_id):
f = flavor.Flavor(
"standard",
[flavor.Provider("mock", "www.mock_provider.com")]
)
if flavor_id == "non_exist":
raise LookupError("More than one flavor/no record was retrieved.")
return f
def add(self, flavor):
pass
def delete(self, flavor_id):
pass
|
plotly/python-api | refs/heads/master | packages/python/plotly/plotly/validators/volume/colorbar/tickfont/__init__.py | 293 | import sys
if sys.version_info < (3, 7):
from ._size import SizeValidator
from ._family import FamilyValidator
from ._color import ColorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
["._size.SizeValidator", "._family.FamilyValidator", "._color.ColorValidator"],
)
|
mickp/microscope | refs/heads/master | microscope/testsuite/deviceserver_test.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
## Copyright (C) 2017 David Pinto <david.pinto@bioch.ox.ac.uk>
##
## This file is part of Microscope.
##
## Microscope is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## Microscope is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Microscope. If not, see <http://www.gnu.org/licenses/>.
import logging
import multiprocessing
import time
import unittest
import unittest.mock
import microscope.clients
import microscope.devices
import microscope.deviceserver
from microscope.devices import device
from microscope.testsuite.devices import TestCamera
from microscope.testsuite.devices import TestFilterWheel
def _serve_without_logs(*args, **kwargs):
"""Run serve_devices without noise from the logs.
The device server redirects the logger to stderr *and* creates
files on the current directory. There is no options to control
this behaviour so we patch the logger first.
"""
def null_logs(*args, **kwargs):
return logging.NullHandler()
## This patches out the logger handler that creates the file.
with unittest.mock.patch('microscope.deviceserver.RotatingFileHandler',
null_logs):
## This patches out the logger handler that redirects the logs
## to the stderr. Because it's going to stderr instead of
## stdout, it's polluting the testsuite output.
with unittest.mock.patch('microscope.deviceserver.StreamHandler',
null_logs):
microscope.deviceserver.serve_devices(*args, **kwargs)
class BaseTestServeDevices(unittest.TestCase):
"""Handles start and termination of deviceserver.
Subclasses may overload class properties defaults as needed.
Attributes:
DEVICES (list): list of :class:`microscope.devices` to initialise.
TIMEOUT (number): time given for service to terminate after
receiving signal to terminate.
p (multiprocessing.Process): device server process.
"""
DEVICES = []
TIMEOUT = 5
def setUp(self):
self.p = multiprocessing.Process(target=_serve_without_logs,
args=(self.DEVICES,))
self.p.start()
def tearDown(self):
self.p.terminate()
self.p.join(self.TIMEOUT)
self.assertFalse(self.p.is_alive(),
"deviceserver not dead after SIGTERM")
class TestStarting(BaseTestServeDevices):
DEVICES = [
device(TestCamera, '127.0.0.1', 8001, {'buffer_length' : 0}),
device(TestFilterWheel, '127.0.0.1', 8003,
{'filters' : [(0, 'GFP', 525), (1, 'RFP'), (2, 'Cy5')]}),
]
def test_standard(self):
"""Simplest case, start and exit, given enough time to start all devices"""
time.sleep(2)
self.assertTrue(self.p.is_alive(), "service dies at start")
def test_immediate_interrupt(self):
"""Check issues on SIGTERM before starting all devices"""
pass
class TestInputCheck(BaseTestServeDevices):
def test_empty_devices(self):
"""Check behaviour if there are no devices."""
time.sleep(2)
self.assertTrue(not self.p.is_alive(),
"not dying for empty list of devices")
class DeviceWithPort(microscope.devices.Device):
def __init__(self, port, **kwargs):
super().__init__(**kwargs)
self._port = port
@property
def port(self):
return self._port
def _on_shutdown(self):
pass
def initialize(self):
pass
class TestClashingArguments(BaseTestServeDevices):
"""Device server and device constructor arguments do not clash"""
DEVICES = [
device(DeviceWithPort, '127.0.0.1', 8000, {'port' : 7000}),
]
def test_port_conflict(self):
time.sleep(2)
client = microscope.clients.Client('PYRO:DeviceWithPort@127.0.0.1:8000')
self.assertEqual(client.port, 7000)
if __name__ == '__main__':
unittest.main()
|
bringingheavendown/numpy | refs/heads/master | numpy/testing/__init__.py | 11 | """Common test support for all numpy test scripts.
This single module should provide all the common functionality for numpy tests
in a single location, so that test scripts can just import it and work right
away.
"""
from __future__ import division, absolute_import, print_function
from unittest import TestCase
from . import decorators as dec
from .nosetester import run_module_suite, NoseTester as Tester, _numpy_tester
from .utils import *
test = _numpy_tester().test
|
flyfax/huawei_metis | refs/heads/master | app/tcm/tcm_views.py | 1 | # -*- coding: utf-8 -*-
import logging
import time
from flask import request, jsonify
from app import multiauth, errHandler, models, mysqlhandler, aws_s3Handler
from . import tcm_rank
from .. import mysql
redis_logger = logging.getLogger('redis')
runMySQL = mysqlhandler.MySQLHandler(mysql)
tcmPointHandler = mysqlhandler.TCMPointHandler(mysql)
@tcm_rank.route('/api/generate_mission_table', methods=['POST'])
def generate_misssion_table():
mission_start_date = request.json.get('mission_start_date')
mission_period_days = request.json.get('mission_period_days')
result = tcmPointHandler.generate_mission_date_table(mission_period_days, mission_start_date)
return jsonify({"errcode": 200, "respmsg": result})
@tcm_rank.route('/api/check_mission_table', methods=['POST'])
def check_mission_table():
rv = runMySQL.sqlqueryall('''SELECT mission_number, mission_start_date, mission_end_date FROM mission_date''')
date_json = []
for item in rv:
date_json.append({"mission_number": item[0], "mission_start_date": item[1], "mission_end_date": item[2]})
return jsonify({"errcode": 200, "mission_date": date_json})
@tcm_rank.route('/api/mission_rank_by_point', methods=['POST'])
@multiauth.login_required
def mission_ecg_rank():
user_account = request.json.get('userid')
mission_number = mysqlhandler.Task_idHandler(mysql).get_task_id()
header_token = request.headers.get('Authorization').split(' ')[1]
status = models.check_userid_and_token(user_account, header_token)
if status:
return status
else:
result = {}
rv = runMySQL.sqlqueryall('''SELECT user_id, task_based_points, task_multiplier, bonus, extra_bonus FROM points_bonus_view WHERE task_id=%s ORDER BY task_based_points DESC limit 3''' % mission_number)
try:
for item in rv:
total_points = (item[1] + item[3]) * item[2] + item[4]
result[item[0]] = int(total_points)
except IndexError:
return jsonify({"errcode": 704, "respmsg": errHandler.get_errmsg(704)})
return jsonify({"errcode": 200, "respmsg": errHandler.get_errmsg(200), "mission_rank": sorted(result.iteritems(), key=lambda d: d[1], reverse=True)}) # Sort directory sequence
@tcm_rank.route('/api/all_rank_by_point', methods=['POST'])
@multiauth.login_required
def all_ecg_rank():
user_account = request.json.get('userid')
header_token = request.headers.get('Authorization').split(' ')[1]
status = models.check_userid_and_token(user_account, header_token)
if status:
return status
else:
result = {}
rv = runMySQL.sqlqueryall(
'''SELECT user_id, task_based_points, task_multiplier, bonus, extra_bonus FROM points_bonus_view ORDER BY task_based_points DESC''')
for item in rv:
points = (item[1] + item[3]) * item[2] + item[4]
if result.has_key(item[0]):
result[item[0]] = result[item[0]] + int(points)
else:
result[item[0]] = int(points)
return jsonify({"errcode": 200, "respmsg": errHandler.get_errmsg(200),
"all_rank": sorted(result.iteritems(), key=lambda d: d[1], reverse=True)})
@tcm_rank.route('/api/user_in_mission_rank', methods=['POST'])
@multiauth.login_required
def tcm_ecg_mission_rank():
user_account = request.json.get('userid')
mission_number = mysqlhandler.Task_idHandler(mysql).get_task_id()
header_token = request.headers.get('Authorization').split(' ')[1]
status = models.check_userid_and_token(user_account, header_token)
if status:
return status
else:
rv = runMySQL.sqlquery('''SELECT COUNT(DISTINCT(user_id)) FROM points_bonus_view WHERE task_id=%s''' % mission_number) # calculate all users
rv1 = runMySQL.sqlquery('''SELECT COUNT(a.user_id) FROM points_bonus_view a WHERE a.task_id=%s and ((a.task_based_points+a.bonus)*a.task_multiplier+a.extra_bonus)>
(SELECT ((b.task_based_points+b.bonus)*b.task_multiplier+b.extra_bonus) FROM points_bonus_view b WHERE b.user_id=%s and b.task_id=%s)'''
% (mission_number, user_account, mission_number))
if rv1 is None or rv1[0] == 0 :
return jsonify({"errcode": 704, "respmsg": errHandler.get_errmsg(704)})
try:
user_in_mission_rank = "%.f%%" % (float(rv1[0]) / float(rv[0]) * 100)
except ValueError:
return jsonify({"errcode": 704, "respmsg": errHandler.get_errmsg(704)})
return jsonify({"errcode": 200, "respmsg": errHandler.get_errmsg(200), "user_in_mission_rank": user_in_mission_rank})
@tcm_rank.route('/api/user_in_all_rank', methods=['POST'])
@multiauth.login_required
def tcm_ecg_all_rank():
user_account = request.json.get('userid')
header_token = request.headers.get('Authorization').split(' ')[1]
status = models.check_userid_and_token(user_account, header_token)
if status:
return status
else:
rv = runMySQL.sqlquery('''SELECT COUNT(DISTINCT(user_id)) FROM points_bonus_view''') # calculate all users
rv1 = runMySQL.sqlquery('''SELECT COUNT(user_id) FROM points_bonus_view a WHERE a.total_points>(SELECT SUM(b.total_points) FROM points_bonus_view b WHERE
b.user_id=%s) GROUP BY user_id''' % user_account)
if rv1 is None or rv1 == 0:
return jsonify({"errcode": 400, "respmsg": errHandler.get_errmsg(400)})
return jsonify({"errcode": 200, "respmsg": errHandler.get_errmsg(200), "user_in_all_rank": '%.f%%' % (float(rv1[0]) / float(rv[0]) * 100)})
def get_x1_timestamp(user_id, mission_table):
mission_number = mysqlhandler.Task_idHandler(mysql).get_task_id()
# Get existed x1 timestamp
rv = runMySQL.sqlquery('''SELECT x1_timestamp from %s WHERE user_id='%s' ''' % (mission_table, user_id))
# Get default mission_start and mission end date
# date_rv = runMySQL.sqlquery('''SELECT mission_start_date, mission_end_date from mission_date WHERE mission_number=%s ''' % mission_number)
# mission_start_date, mission_end_date = date_rv[0], date_rv[1]
mission_start_date, mission_end_date = tcmPointHandler.get_mission_date(mission_number)
if rv is None or rv[0] is None:
record_date_rv = runMySQL.sqlquery('''SELECT record_date FROM task_ecg_records WHERE (record_date between %s and %s) and (user_id='%s') Order By record_date limit 1 '''
% (mission_start_date, mission_end_date, user_id))
if record_date_rv is None:
return False
else:
return record_date_rv[0]
else:
record_date_rv = runMySQL.sqlquery('''SELECT record_date FROM task_ecg_records WHERE (record_date between DATE('%s') and %s) and (user_id='%s') Order By record_date limit 1 '''
% (rv[0], mission_end_date, user_id))
if record_date_rv is None:
return False
else:
return record_date_rv[0]
def get_s1_timestamp(user_id):
mission_number = mysqlhandler.Task_idHandler(mysql).get_task_id()
rv = runMySQL.sqlquery('''SELECT timestamp FROM hw_wear_records WHERE user_id = %s AND task_id = %s'''%(user_id,mission_number))
date_rv = runMySQL.sqlquery('''SELECT UNIX_TIMESTAMP(mission_start_date), UNIX_TIMESTAMP(mission_end_date) FROM mission_date WHERE mission_number=%s ''' % mission_number)
mission_start_date, mission_end_date = date_rv[0], date_rv[1]
if rv is None or rv[0] is None:
record_date_rv = runMySQL.sqlquery('''SELECT timestamp FROM hw_wear_records WHERE (timestamp between %s and %s) and user_id='%s' Order By timestamp limit 1 '''
% (mission_start_date, mission_end_date, user_id))
if record_date_rv is None:
return False
else:
return record_date_rv[0]
else:
record_date_rv = runMySQL.sqlquery('''SELECT timestamp FROM hw_wear_records WHERE (timestamp between %s and %s) and user_id='%s' Order By timestamp limit 1 '''
% (rv[0], mission_end_date, user_id))
if record_date_rv is None:
return False
else:
return record_date_rv[0]
def get_ques_timestamp(user_id, mission_table):
mission_number = mysqlhandler.Task_idHandler(mysql).get_task_id()
# Get existed s1 timestamp
rv = runMySQL.sqlquery('''SELECT ques_timestamp from %s WHERE user_id=%s ''' % (mission_table, user_id))
# Get default mission_start and mission end date
date_rv = runMySQL.sqlquery('''SELECT mission_start_date, mission_end_date FROM mission_date WHERE mission_number=%s ''' % mission_number)
mission_start_date, mission_end_date = date_rv[0], date_rv[1]
if rv is None or rv[0] is None:
record_date_rv = runMySQL.sqlquery('''SELECT record_date FROM questionnaires WHERE (record_date between %s and %s) and user_id='%s' Order By record_date limit 1 '''
% (mission_start_date, mission_end_date, user_id))
if record_date_rv is None:
return False
else:
return record_date_rv[0]
else:
record_date_rv = runMySQL.sqlquery('''SELECT record_date FROM questionnaires WHERE (record_date between DATE('%s') and %s) and user_id='%s' Order By record_date limit 1 '''
% (rv[0], mission_end_date, user_id))
if record_date_rv is None:
return False
else:
return record_date_rv[0]
@tcm_rank.route('/api/total_points', methods=['POST'])
@multiauth.login_required
def get_total_point():
user_account = request.json.get('userid')
header_token = request.headers.get('Authorization').split(' ')[1]
status = models.check_userid_and_token(user_account, header_token)
if status:
return status
else:
rv = runMySQL.sqlquery('''SELECT SUM(total_points) FROM points_bonus_view WHERE user_id=%s ''' % user_account)
if rv is None:
return jsonify({"errcode": 400, "respmsg": errHandler.get_errmsg(400)})
elif rv[0] == 0:
return jsonify({"errcode": 701, "respmsg": errHandler.get_errmsg(701)})
else:
return jsonify({"errcode": 200, "total_points": rv[0]})
@tcm_rank.route('/api/total_users', methods=['POST'])
@multiauth.login_required
def tcm_total_user():
user_account = request.json.get('userid')
header_token = request.headers.get('Authorization').split(' ')[1]
status = models.check_userid_and_token(user_account, header_token)
if status:
return status
else:
rv = runMySQL.sqlquery('''SELECT COUNT(DISTINCT(user_id)) FROM points_bonus_view''')
if rv is None:
return jsonify({"errcode": 400, "respmsg": errHandler.get_errmsg(400)})
return jsonify({"errcode": 200, "respmsg": errHandler.get_errmsg(200), "total_users": rv[0]})
@tcm_rank.route('/api/city_in_all_rank', methods=['POST'])
@multiauth.login_required
def allPointRank_ByCity():
user_account = request.json.get('userid')
header_token = request.headers.get('Authorization').split(' ')[1]
status = models.check_userid_and_token(user_account, header_token)
if status:
return status
else:
rv = runMySQL.sqlquery('''SELECT residence, COUNT(*) AS rank FROM user_profile GROUP BY residence ORDER BY rank DESC LIMIT 3''', num=3)
if rv is None:
return jsonify({"errcode": 400, "respmsg": errHandler.get_errmsg(400)})
result = {}
for i in range(0, 3):
result[rv[i][0]] = rv[i][1]
return jsonify({"errcode": 200, "respmsg": errHandler.get_errmsg(200), "top_three_city_rank": result})
# Return the mission progress till now
def get_mission_progress(mission_number):
rv = runMySQL.sqlquery('''CALL sp_mission_progress("%s")''' % (mission_number))
try:
mission_progress = "%.0f%%" % (float(rv[0]) / float(3) * 100) # One mission is 14 days
return mission_progress
except TypeError:
return False
except ValueError:
return False
# Return the number of ecg data which uploaded by users
def get_difference_makers(user_account, mission_start_date, mission_end_date):
# key_set = set()
# key_list = aws_s3Handler.get_prefix_keys('ECG/')
# for i in key_list:
# key_set.add(i.split('/')[1])
#
# return (len(key_set) - 1)
rv = runMySQL.sqlquery('''SELECT COUNT(rawdata_s3) FROM tcm_raw_data WHERE user_id=%s and %s>=UNIX_TIMESTAMP(timestamp) and UNIX_TIMESTAMP(timestamp)>=%s and type=11 ''' % (
user_account, mission_end_date, mission_start_date))
if rv is None:
return 0
else:
return rv[0]
# Return the number of users completed the mission
def get_total_finishers(task_id=None):
if task_id is None:
rv = runMySQL.sqlquery('''SELECT SUM(task_completed_tag) FROM mission_points_summary''')
else:
rv = runMySQL.sqlquery('''SELECT SUM(task_completed_tag) FROM mission_points_summary WHERE task_id=%s ''' % task_id)
if (rv is None) or (rv[0] is None):
return 0
return rv[0]
# Return all ecg record time till now
def get_experiment_progress(user_account, task_id=None):
if task_id is None:
rv = runMySQL.sqlquery('''SELECT SUM(record_time) FROM task_ecg_records WHERE user_id=%s ''' % user_account)
else:
rv = runMySQL.sqlquery('''SELECT SUM(record_time) FROM task_ecg_records WHERE user_id=%s and task_id=%s ''' % (user_account,task_id))
if rv[0] is None:
return 0
return rv[0]
# Return the sum of all sleep time
def get_sleep_collected(user_account, mission_start_date, mission_end_date):
rv = runMySQL.sqlquery('''SELECT SUM(TIMESTAMPDIFF(HOUR, sleep_start, sleep_end)) FROM sleepreport_records WHERE user_id=%s and %s>=UNIX_TIMESTAMP(timestamp) and UNIX_TIMESTAMP(
timestamp)>=%s ''' % (user_account, mission_end_date, mission_start_date))
if rv[0] is None:
return 0
return rv[0]
# Return the number of heartbeats in the latest RR raw data
def get_heartbeats_collected(user_account, mission_start_date, mission_end_date):
rv = runMySQL.sqlquery('''SELECT rawdata_s3 FROM tcm_raw_data WHERE user_id=%s and type=7 and %s>=UNIX_TIMESTAMP(timestamp) and UNIX_TIMESTAMP(timestamp)>=%s ORDER BY starttime DESC
limit 1 ''' % (user_account, mission_end_date, mission_start_date))
if rv is None:
return 0
result = aws_s3Handler.get_rawdata(rv[0])
return (len(result.split('\n')))
# TODO add get steps
def get_steps(user_account):
return "TBD"
# Return total number of symptoms input
def get_total_symptoms_input(user_account):
rv = runMySQL.sqlquery('''SELECT COUNT(*) FROM symptom_records WHERE user_id=%s ''' % user_account)
return rv[0]
# Return total number of questionnaires answered
def get_total_questionnaires_answered(user_acccount):
rv = runMySQL.sqlquery('''SELECT COUNT(*) FROM questionnaires WHERE user_id=%s ''' % user_acccount)
return rv[0]
# Return total number of hw wear time
def get_s1_worn_time(user_account):
rv = runMySQL.sqlquery('''SELECT SUM(wear_time) FROM hw_wear_records WHERE user_id=%s ''' % user_account)
return rv[0]
@tcm_rank.route('/api/load_lab_page', methods=['POST'])
@multiauth.login_required
def load_lab_page():
user_account = request.json.get('userid')
mission_number = mysqlhandler.Task_idHandler(mysql).get_task_id()
header_token = request.headers.get('Authorization').split(' ')[1]
status = models.check_userid_and_token(user_account, header_token)
if status:
return status
else:
rv = runMySQL.sqlquery('''SELECT mission_start_date, mission_end_date FROM mission_date WHERE mission_number=%s ''' %mission_number)
mission_start_date = rv[0]
mission_end_date = rv[1]
try:
mission_progress = get_mission_progress(mission_number)
different_makers = get_difference_makers(user_account, mission_start_date, mission_end_date)
total_finisher = get_total_finishers(mission_number)
experiment_progress = "%.2f%%" % (float(get_experiment_progress(user_account, mission_number)) / float(6048 * 60)) # Experiment will last 252 days, 252 days = 6048 hours
sleep_collected = int(get_sleep_collected(user_account, mission_start_date, mission_end_date))
heartbeats_collected = get_heartbeats_collected(user_account, mission_start_date, mission_end_date)
steps = get_steps(user_account)
X1_time = int(get_experiment_progress(user_account) / 60) # get_experiment_progress return is minute, X1_time is hour
S1_time = get_s1_worn_time(user_account)
total_questionnaire_answer = get_total_questionnaires_answered(user_account)
total_symptom_inputs = get_total_symptoms_input(user_account)
except TypeError:
return jsonify({"errcode": 704, "respmsg": errHandler.get_errmsg(704)})
if mission_progress is not False:
return jsonify({"errcode": 200,
"lab_page_content": {"Mission_progress": mission_progress, "UploadedData_different_makers": different_makers, "Total_mission_completers": total_finisher,
"Experiment_progress": experiment_progress,
"Sleep_collected": sleep_collected, "Heartbeats_collected": heartbeats_collected, "Steps": steps, "X1": X1_time, "S1": S1_time,
"Questionnaire_Survey": total_questionnaire_answer,
"Symptom_inputs": total_symptom_inputs}})
elif mission_progress is False:
return jsonify({"errcode": 705, "respmsg": errHandler.get_errmsg(705)})
else:
return jsonify({"errcode": 400, "respmsg": errHandler.get_errmsg(400)})
@tcm_rank.route('/api/get_points', methods=["POST"])
@multiauth.login_required
def get_points():
user_account = request.json.get('userid')
header_token = request.headers.get('Authorization').split(' ')[1]
status = models.check_userid_and_token(user_account, header_token)
if status:
return status
else:
total_points = 0
# 1. Get bonus, extra_bonus
rv = runMySQL.sqlquery('''SELECT bonus, extra_bonus FROM level_bonus_mapping WHERE user_id=%s ''' % user_account)
bonus = rv[0]
extra_bonus = rv[1]
# 2. Get based points
base_rs = runMySQL.sqlqueryall('''SELECT task_based_points, task_multiplier, task_id FROM mission_points_summary WHERE user_id=%s ''' % user_account)
if not base_rs:
return jsonify({"errcode": 400, "respmsg": errHandler.get_errmsg(400)})
# 3. Get hw wearing time
wearing_rs = runMySQL.sqlquery('''SELECT SUM(wear_time) FROM hw_wear_records WHERE user_id=%s ''' % user_account)
# For test to display detail info
based_points = {}
multiplier = {}
local_server_time = time.strftime("%Y-%m-%d", time.localtime())
rr = runMySQL.sqlquery('''SELECT mission_number, mission_start_date, mission_end_date FROM mission_date WHERE UNIX_TIMESTAMP(mission_end_date)>=UNIX_TIMESTAMP('%s') AND
UNIX_TIMESTAMP('%s')>=UNIX_TIMESTAMP(mission_start_date) ''' % (local_server_time, local_server_time))
rr1 = runMySQL.sqlquery('''SELECT SUM(record_time) FROM task_ecg_records WHERE user_id=%s and UNIX_TIMESTAMP(timestamp)>=UNIX_TIMESTAMP('%s') and UNIX_TIMESTAMP(
'%s')>=UNIX_TIMESTAMP(timestamp)''' % (user_account, rr[1], rr[2]))
if rr1[0] is None:
rr_value = 0
else:
rr_value = int(rr1[0])
for i in base_rs:
# For test to display detail info
based_points.setdefault(i[2], i[0])
multiplier.setdefault(i[2], i[1])
total_points = total_points + (i[0] + bonus + int(wearing_rs[0]/60)) * i[1] + extra_bonus
return jsonify({"errcode": 200, "respmsg": errHandler.get_errmsg(200), "points": total_points, "test_detail_info":{"ecg_hours": rr_value, "bonus": bonus, "extra_bonus": extra_bonus,
"task_based_points": based_points, "multiplier": multiplier,
"hwwear_time": int(wearing_rs[0])/60}})
@tcm_rank.route('/api/sync_mission_date', methods=['POST'])
@multiauth.login_required
def sync_mission_date():
user_account = request.json.get('userid')
header_token = request.headers.get('Authorization').split(' ')[1]
status = models.check_userid_and_token(user_account, header_token)
if status:
return status
else:
rv = runMySQL.sqlquery('''SELECT UNIX_TIMESTAMP(mission_start_date) FROM mission_date ORDER BY mission_number ASC limit 1 ''')
return jsonify({"errcode": 200, "respmsg": errHandler.get_errmsg(200), "first_mission_start_date": rv[0]})
@tcm_rank.route('/api/get_tutorial_status', methods=['POST'])
@multiauth.login_required
def get_tutorial_status():
user_account = request.json.get('userid')
header_token = request.headers.get('Authorization').split(' ')[1]
status = models.check_userid_and_token(user_account, header_token)
if status:
return status
else:
rv = runMySQL.sqlquery('''SELECT completed_tag FROM tutorial_status WHERE userid = %s''' %user_account)
if (rv is None) or (rv[0] is None):
return jsonify({"errcode": 200, "respmsg": errHandler.get_errmsg(200), "required":0, "kit":0, "avatar":0, "ques":0})
elif rv==0:
rv = runMySQL.sqlquery('''SELECT required, kit, avatar, intro_ques FROM tutorial_status WHERE userid = %s''' %user_account)
#for kit 1=both, 2=only x1, 3=only s1, for avatar 1=jeff, 2=lisa
return jsonify({"errcode": 200, "respmsg": errHandler.get_errmsg(200), "required": rv[0], "kit":rv[1], "avatar":rv[2], "ques":rv[3]})
else:
return jsonify({"errcode": 901, "respmsg": errHandler.get_errmsg(901)})
@tcm_rank.route('/api/upload_tutorial_status', methods=['POST'])
@multiauth.login_required
def upload_tutorial_status():
user_account = request.json.get('userid')
header_token = request.headers.get('Authorization').split(' ')[1]
rr = request.json.get('tutorial')
status = models.check_userid_and_token(user_account, header_token)
if status:
return status
else:
rv = runMySQL.sqlquery('''SELECT completed_tag FROM tutorial_status WHERE userid = %s''' %user_account)
tag = rr['ques']
if (rv is None) or (rv[0] is None):
#for kit 1=both, 2=only x1, 3=only s1, for avatar 1=jeff, 2=lisa
runMySQL.sqlquery(''' INSERT INTO tutorial_status (required, kit, avatar, intro_ques, completed_tag) VALUES (%s,%s,%s,%s,%s) WHERE user_id=%s'''%(rr[0], rr[1], rr[2], rr[3], tag, user_account),sqlcommit=1)
return jsonify({"errcode": 200, "respmsg": errHandler.get_errmsg(200)})
else:
#for kit 1=both, 2=only x1, 3=only s1, for avatar 1=jeff, 2=lisa
runMySQL.sqlquery(''' UPDATE tutorial_status SET required=%s, kit=%s, avatar=%s, intro_ques=%s, completed_tag=%s WHERE user_id=%s '''%(rr[0], rr[1], rr[2], rr[3], tag, user_account),sqlcommit=1)
return jsonify({"errcode": 200, "respmsg": errHandler.get_errmsg(200)})
def get_research_heros(mission_number):
rv = runMySQL.sqlquery('''SELECT COUNT(DISTINCT(user_id)) FROM mission_points_summary WHERE task_id=%s'''%mission_number)
return rv[0]
def get_ecg_wearing():
rv = runMySQL.sqlquery('''SELECT COUNT(DISTINCT(user_id)) FROM task_ecg_records WHERE (%s-UNIX_TIMESTAMP(timestamp))<10800'''%time.time())
return rv[0]
def get_wristband_wearing():
rv = runMySQL.sqlquery('''SELECT COUNT(DISTINCT(user_id)) FROM hw_wear_records WHERE (%s-timestamp)<10800'''%time.time())
return rv[0]
def get_user_rank(user_account,mission_number):
rv = runMySQL.sqlquery('''SELECT COUNT(DISTINCT(user_id)) FROM points_bonus_view WHERE task_id=%s''' % mission_number) # calculate all users
rv1 = runMySQL.sqlquery('''SELECT COUNT(a.user_id) FROM points_bonus_view a WHERE a.task_id=%s and (a.task_based_points*a.task_multiplier+a.bonus+a.extra_bonus)>
(SELECT (b.task_based_points*b.task_multiplier+b.bonus+b.extra_bonus) FROM points_bonus_view b WHERE b.user_id=%s and b.task_id=%s)'''
% (mission_number, user_account, mission_number))
if rv[0] == 0:
return 100
else:
rank = int(float(rv1[0]) / float(rv[0]) * 100)+1
return rank
def get_total_points(mission_number):
rv = runMySQL.sqlquery('''SELECT SUM(total_points) FROM points_bonus_view WHERE task_id=%s''' %mission_number)
if (rv is None) or (rv[0] is None):
return 0
else:
return rv[0]
@tcm_rank.route('/api/ended_mission_data', methods=['POST'])
@multiauth.login_required
def ended_mission_data():
user_account = request.json.get('userid')
header_token = request.headers.get('Authorization').split(' ')[1]
status = models.check_userid_and_token(user_account, header_token)
mission_number = mysqlhandler.Task_idHandler(mysql).get_task_id()
if status:
return status
else:
result = []
for task_id in range(1,mission_number):
user_in_mission_rank = get_user_rank(user_account,task_id)
rv = runMySQL.sqlquery('''SELECT total_finisher, total_points FROM ended_mission_summary WHERE mission_number=%s'''%task_id)
if (rv is None) or (rv[0] is None):
finished = get_total_finishers(task_id)
total_points = get_total_points(task_id)
runMySQL.sqlquery('''INSERT INTO ended_mission_summary(mission_number,total_finisher,total_points) VALUES(%s,%s,%s)''' %(task_id,finished,total_points),sqlcommit=1)
else:
finished = rv[0]
total_points = rv[1]
result.append({"mission_number":task_id,"total_finisher":finished, "total_points":total_points, "rank":user_in_mission_rank})
return jsonify({"errcode": 200, "result":result})
@tcm_rank.route('/api/test_data', methods=['POST'])
def test_data():
mission_number = request.json.get('mission_number')
rv = runMySQL.sqlquery('''SELECT mission_start_date,mission_end_date FROM mission_date WHERE mission_number=%s'''%mission_number)
mission_start_date = rv[0]
mission_end_date = rv[1]
users=('0911912347','0918616828','0919015376','0921631750','0930848267','0922708977','0989699417','0928126552','0928798876','0952153329','0953940220','0936637549','13515101248','13705157910','13851413464','13911352002','15005147184','15396766148','15518782601','15715167336','15805170162','15895889016','15938734803','17714302084','18061250061','18248790604','18651899190')
result=[]
for user_account in users:
rv = runMySQL.sqlquery('''SELECT SUM(record_time) FROM task_ecg_records WHERE user_id=%s AND task_id=%s'''%(user_account,mission_number))
if (rv is None) or (rv[0] is None):
ecg = 0
else:
ecg = float(rv[0])/9.6
rs = runMySQL.sqlquery('''SELECT SUM(wear_time) FROM hw_wear_records WHERE user_id=%s AND task_id=%s'''%(user_account,mission_number))
if (rs is None) or (rs[0] is None):
wristband = 0
else:
wristband = float(rs[0])/24
must_ques = runMySQL.sqlquery('''SELECT must_completed_ques FROM mission_points_summary WHERE user_id=%s AND task_id=%s'''%(user_account,mission_number))
questionaries = runMySQL.sqlquery('''SELECT count(user_id) FROM questionnaires WHERE user_id=%s AND task_id= %s''' %(user_account,mission_number))
symptoms = runMySQL.sqlquery('''SELECT COUNT(user_id) FROM symptom_records WHERE user_id=%s AND (timestamp BETWEEN '%s' AND '%s')'''%(user_account,mission_start_date,mission_end_date))
result.append({"user":user_account,"ecg":ecg,"wristband":wristband,"must_ques":must_ques,"questionaries":questionaries,"symptoms":symptoms})
return jsonify({"result":result})
@tcm_rank.route('/api/new_load_lab_page', methods=['POST'])
@multiauth.login_required
def new_load_lab_page():
user_account = request.json.get('userid')
mission_number = mysqlhandler.Task_idHandler(mysql).get_task_id()
header_token = request.headers.get('Authorization').split(' ')[1]
status = models.check_userid_and_token(user_account, header_token)
if status:
return status
else:
last_sync_time = runMySQL.sqlquery('''SELECT MAX(timestamp) FROM mission_summary WHERE mission_number=%s''' %mission_number)
local_time = int(time.time())
if (last_sync_time is None) or (last_sync_time[0] is None) or (local_time-last_sync_time[0])>1800:
mission_progress = get_mission_progress(mission_number)
research_heros = get_research_heros(mission_number)
finished = get_total_finishers(mission_number)
ecg = get_ecg_wearing()
wristband = get_wristband_wearing()
user_in_mission_rank = get_user_rank(user_account,mission_number)
points = get_total_points(mission_number)
runMySQL.sqlquery('''INSERT INTO mission_summary(mission_number,research_heros,finished,ecg,wristband,points,timestamp) VALUES(%s,%s,%s,%s,%s,%s,%s)'''
%(mission_number,research_heros,finished,ecg,wristband,points,local_time),sqlcommit=1)
else:
rv = runMySQL.sqlquery('''SELECT research_heros,finished,ecg,wristband,points FROM mission_summary WHERE timestamp=(select max(timestamp) FROM mission_summary WHERE mission_number=%s)''' %mission_number)
mission_progress = get_mission_progress(mission_number)
research_heros = rv[0]
finished = rv[1]
ecg = rv[2]
wristband = rv[3]
user_in_mission_rank = get_user_rank(user_account,mission_number)
points = rv[4]
return jsonify({"errcode": 200, "respmsg": errHandler.get_errmsg(200), "mission_progress":mission_progress,
"research_heros":research_heros, "people_finished":finished, "wearing_ecg":ecg, "wearing_wristband":wristband,
"user_rank":user_in_mission_rank, "points":points})
@tcm_rank.route('/api/get_unlockable', methods=['POST'])
@multiauth.login_required
def get_unlockable():
user_account = request.json.get('userid')
header_token = request.headers.get('Authorization').split(' ')[1]
status = models.check_userid_and_token(user_account, header_token)
if status:
return status
else:
rv = runMySQL.sqlquery('''SELECT unlocked,special_unlocked,now_wearing FROM unlockable WHERE user_id=%s'''%user_account)
if rv is None:
runMySQL.sqlquery('''INSERT INTO unlockable(user_id,unlocked,special_unlocked,now_wearing) VALUES(%s,0,0,0)'''%user_account,sqlcommit=1)
unlocked=special_unlocked=now_wearing='0'
else:
unlocked=rv[0]
special_unlocked=rv[1]
now_wearing=rv[2]
return jsonify({"errcode": 200, "respmsg": errHandler.get_errmsg(200),"unlocked":unlocked,"special_unlocked":special_unlocked,"now_wearing":now_wearing})
@tcm_rank.route('/api/update_unlockable', methods=['POST'])
@multiauth.login_required
def update_unlockable():
user_account = request.json.get('userid')
unlocked = request.json.get('unlocked')
special_unlocked = request.json.get('special_unlocked')
now_wearing = request.json.get('now_wearing')
header_token = request.headers.get('Authorization').split(' ')[1]
status = models.check_userid_and_token(user_account, header_token)
if status:
return status
else:
runMySQL.sqlquery('''UPDATE unlockable SET unlocked = %s, special_unlocked = %s,
now_wearing = %s WHERE user_id = %s'''%(unlocked,special_unlocked,now_wearing,user_account),sqlcommit=1)
return jsonify({"errcode": 200, "respmsg": errHandler.get_errmsg(200)})
@tcm_rank.route('/api/mission_completion', methods=['POST'])
@multiauth.login_required
def mission_completion():
user_account = request.json.get('userid')
header_token = request.headers.get('Authorization').split(' ')[1]
mission_number = mysqlhandler.Task_idHandler(mysql).get_task_id()
status = models.check_userid_and_token(user_account, header_token)
if status:
return status
else:
result=[]
rs = runMySQL.sqlquery('''SELECT SUM(task_completed_tag) FROM mission_points_summary WHERE user_id=%s'''%user_account)
cur_multiplier=tcmPointHandler.mapping_flask_multipliers(rs[0])
for task_id in range(1,mission_number):
rv=runMySQL.sqlquery('''SELECT task_completed_tag FROM mission_points_summary WHERE user_id=%s AND task_id=%s'''%(user_account,task_id))
if rv is None:
rv=[0]
result.append(rv[0])
return jsonify({"errcode": 200, "respmsg": errHandler.get_errmsg(200),"cur_multiplier":cur_multiplier,"mission_completion":result}) |
mindnervestech/mnrp | refs/heads/master | addons/base_import/tests/__init__.py | 179 | from . import test_cases
checks = [test_cases]
|
oihane/odoo | refs/heads/8.0 | addons/l10n_ch/account_wizard.py | 424 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi. Copyright Camptocamp SA
# Financial contributors: Hasa SA, Open Net SA,
# Prisme Solutions Informatique SA, Quod SA
#
# Translation contributors: brain-tec AG, Agile Business Group
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv.orm import TransientModel
class WizardMultiChartsAccounts(TransientModel):
_inherit ='wizard.multi.charts.accounts'
def onchange_chart_template_id(self, cursor, uid, ids, chart_template_id=False, context=None):
if context is None: context = {}
res = super(WizardMultiChartsAccounts, self).onchange_chart_template_id(cursor, uid, ids,
chart_template_id=chart_template_id,
context=context)
# 0 is evaluated as False in python so we have to do this
# because original wizard test code_digits value on a float widget
if chart_template_id:
sterchi_template = self.pool.get('ir.model.data').get_object(cursor, uid, 'l10n_ch', 'l10nch_chart_template')
if sterchi_template.id == chart_template_id:
res['value']['code_digits'] = 0
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
caioserra/apiAdwords | refs/heads/master | examples/adspygoogle/dfp/v201311/team_service/update_teams.py | 2 | #!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example updates teams by adding an ad unit to the first 5.
To determine which teams exist, run get_all_teams.py. To determine which ad
units exist, run get_all_ad_units.py
Tags: TeamService.getTeamsByStatement, TeamService.updateTeams
"""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
from adspygoogle.common import Utils
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..', '..'))
# Initialize appropriate service.
team_service = client.GetService('TeamService', version='v201311')
# Set the ID of the ad unit to add to the teams.
ad_unit_id = 'INSERT_AD_UNIT_ID_HERE'
# Create a statement to select first 5 teams that aren't built-in.
filter_statement = {'query': 'WHERE id > 0 LIMIT 5'}
# Get teams by statement.
response = team_service.GetTeamsByStatement(filter_statement)[0]
teams = []
if 'results' in response:
teams = response['results']
if teams:
# Update each local team object by adding the ad unit to it.
for team in teams:
ad_unit_ids = []
if 'adUnitIds' in team:
ad_unit_ids = team['adUnitIds']
# Don't add the ad unit if the team has all inventory already.
if not Utils.BoolTypeConvert(team['hasAllInventory']):
ad_unit_ids.append(ad_unit_id)
team['adUnitIds'] = ad_unit_ids
# Update teams on the server.
teams = team_service.UpdateTeams(teams)
# Display results.
if teams:
for team in teams:
print ('Team with id \'%s\' and name \'%s\' was updated.'
% (team['id'], team['name']))
else:
print 'No teams were updated.'
else:
print 'No teams found to update.'
|
akretion/multi-company | refs/heads/8.0 | __unported__/product_autocompany/__openerp__.py | 1 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 Savoir-faire Linux (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Product automatic company',
'version' : '1.0',
"author" : "Savoir-faire Linux",
"website" : "http://www.savoirfairelinux.com",
'license': 'AGPL-3',
'category' : 'Sales',
'depends' : ['product', 'stock'],
'description': """
This module:
* Makes company field mandatory on product
* Sets the default value to the company of the user session
* Hides the field to the user.
""",
'data' : ['product_autocompany_view.xml'],
'auto_install': False,
'installable': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
aschiweck/mutt-atlassian | refs/heads/master | mutt/atlassian/scripts/__init__.py | 1 | # -*- coding: utf-8 -*-
'''
@author: Andreas Schiweck
'''
|
harlequin/sickbeard | refs/heads/master | sickbeard/image_cache.py | 27 | # Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import os.path
import sickbeard
from sickbeard import helpers, logger, exceptions
from sickbeard import encodingKludge as ek
from sickbeard.metadata.generic import GenericMetadata
from lib.hachoir_parser import createParser
from lib.hachoir_metadata import extractMetadata
class ImageCache:
def __init__(self):
pass
def _cache_dir(self):
"""
Builds up the full path to the image cache directory
"""
return ek.ek(os.path.abspath, ek.ek(os.path.join, sickbeard.CACHE_DIR, 'images'))
def poster_path(self, tvdb_id):
"""
Builds up the path to a poster cache for a given tvdb id
returns: a full path to the cached poster file for the given tvdb id
tvdb_id: ID of the show to use in the file name
"""
poster_file_name = str(tvdb_id) + '.poster.jpg'
return ek.ek(os.path.join, self._cache_dir(), poster_file_name)
def banner_path(self, tvdb_id):
"""
Builds up the path to a banner cache for a given tvdb id
returns: a full path to the cached banner file for the given tvdb id
tvdb_id: ID of the show to use in the file name
"""
banner_file_name = str(tvdb_id) + '.banner.jpg'
return ek.ek(os.path.join, self._cache_dir(), banner_file_name)
def has_poster(self, tvdb_id):
"""
Returns true if a cached poster exists for the given tvdb id
"""
poster_path = self.poster_path(tvdb_id)
logger.log(u"Checking if file "+str(poster_path)+" exists", logger.DEBUG)
return ek.ek(os.path.isfile, poster_path)
def has_banner(self, tvdb_id):
"""
Returns true if a cached banner exists for the given tvdb id
"""
banner_path = self.banner_path(tvdb_id)
logger.log(u"Checking if file "+str(banner_path)+" exists", logger.DEBUG)
return ek.ek(os.path.isfile, banner_path)
BANNER = 1
POSTER = 2
def which_type(self, path):
"""
Analyzes the image provided and attempts to determine whether it is a poster or banner.
returns: BANNER, POSTER if it concluded one or the other, or None if the image was neither (or didn't exist)
path: full path to the image
"""
if not ek.ek(os.path.isfile, path):
logger.log(u"Couldn't check the type of "+str(path)+" cause it doesn't exist", logger.WARNING)
return None
# use hachoir to parse the image for us
img_parser = createParser(path)
img_metadata = extractMetadata(img_parser)
if not img_metadata:
logger.log(u"Unable to get metadata from "+str(path)+", not using your existing image", logger.DEBUG)
return None
img_ratio = float(img_metadata.get('width'))/float(img_metadata.get('height'))
img_parser.stream._input.close()
# most posters are around 0.68 width/height ratio (eg. 680/1000)
if 0.55 < img_ratio < 0.8:
return self.POSTER
# most banners are around 5.4 width/height ratio (eg. 758/140)
elif 5 < img_ratio < 6:
return self.BANNER
else:
logger.log(u"Image has size ratio of "+str(img_ratio)+", unknown type", logger.WARNING)
return None
def _cache_image_from_file(self, image_path, img_type, tvdb_id):
"""
Takes the image provided and copies it to the cache folder
returns: bool representing success
image_path: path to the image we're caching
img_type: BANNER or POSTER
tvdb_id: id of the show this image belongs to
"""
# generate the path based on the type & tvdb_id
if img_type == self.POSTER:
dest_path = self.poster_path(tvdb_id)
elif img_type == self.BANNER:
dest_path = self.banner_path(tvdb_id)
else:
logger.log(u"Invalid cache image type: "+str(img_type), logger.ERROR)
return False
# make sure the cache folder exists before we try copying to it
if not ek.ek(os.path.isdir, self._cache_dir()):
logger.log(u"Image cache dir didn't exist, creating it at "+str(self._cache_dir()))
ek.ek(os.makedirs, self._cache_dir())
logger.log(u"Copying from "+image_path+" to "+dest_path)
helpers.copyFile(image_path, dest_path)
return True
def _cache_image_from_tvdb(self, show_obj, img_type):
"""
Retrieves an image of the type specified from TVDB and saves it to the cache folder
returns: bool representing success
show_obj: TVShow object that we want to cache an image for
img_type: BANNER or POSTER
"""
# generate the path based on the type & tvdb_id
if img_type == self.POSTER:
img_type_name = 'poster'
dest_path = self.poster_path(show_obj.tvdbid)
elif img_type == self.BANNER:
img_type_name = 'banner'
dest_path = self.banner_path(show_obj.tvdbid)
else:
logger.log(u"Invalid cache image type: "+str(img_type), logger.ERROR)
return False
# retrieve the image from TVDB using the generic metadata class
#TODO: refactor
metadata_generator = GenericMetadata()
img_data = metadata_generator._retrieve_show_image(img_type_name, show_obj)
result = metadata_generator._write_image(img_data, dest_path)
return result
def fill_cache(self, show_obj):
"""
Caches all images for the given show. Copies them from the show dir if possible, or
downloads them from TVDB if they aren't in the show dir.
show_obj: TVShow object to cache images for
"""
logger.log(u"Checking if we need any cache images for show "+str(show_obj.tvdbid), logger.DEBUG)
# check if the images are already cached or not
need_images = {self.POSTER: not self.has_poster(show_obj.tvdbid),
self.BANNER: not self.has_banner(show_obj.tvdbid),
}
if not need_images[self.POSTER] and not need_images[self.BANNER]:
logger.log(u"No new cache images needed, not retrieving new ones")
return
# check the show dir for images and use them
try:
for cur_provider in sickbeard.metadata_provider_dict.values():
logger.log(u"Checking if we can use the show image from the "+cur_provider.name+" metadata", logger.DEBUG)
if ek.ek(os.path.isfile, cur_provider.get_poster_path(show_obj)):
cur_file_name = os.path.abspath(cur_provider.get_poster_path(show_obj))
cur_file_type = self.which_type(cur_file_name)
if cur_file_type == None:
logger.log(u"Unable to retrieve image type, not using the image from "+str(cur_file_name), logger.WARNING)
continue
logger.log(u"Checking if image "+cur_file_name+" (type "+str(cur_file_type)+" needs metadata: "+str(need_images[cur_file_type]), logger.DEBUG)
if cur_file_type in need_images and need_images[cur_file_type]:
logger.log(u"Found an image in the show dir that doesn't exist in the cache, caching it: "+cur_file_name+", type "+str(cur_file_type), logger.DEBUG)
self._cache_image_from_file(cur_file_name, cur_file_type, show_obj.tvdbid)
need_images[cur_file_type] = False
except exceptions.ShowDirNotFoundException:
logger.log(u"Unable to search for images in show dir because it doesn't exist", logger.WARNING)
# download from TVDB for missing ones
for cur_image_type in [self.POSTER, self.BANNER]:
logger.log(u"Seeing if we still need an image of type "+str(cur_image_type)+": "+str(need_images[cur_image_type]), logger.DEBUG)
if cur_image_type in need_images and need_images[cur_image_type]:
self._cache_image_from_tvdb(show_obj, cur_image_type)
logger.log(u"Done cache check")
|
heke123/chromium-crosswalk | refs/heads/master | third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/lint_test_expectations.py | 6 | # Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import logging
import optparse
import signal
import traceback
from webkitpy.common.host import Host
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.port.factory import platform_options
# This mirrors what the shell normally does.
INTERRUPTED_EXIT_STATUS = signal.SIGINT + 128
# This is a randomly chosen exit code that can be tested against to
# indicate that an unexpected exception occurred.
EXCEPTIONAL_EXIT_STATUS = 254
_log = logging.getLogger(__name__)
def lint(host, options):
ports_to_lint = [host.port_factory.get(name) for name in host.port_factory.all_port_names(options.platform)]
files_linted = set()
failures = []
for port_to_lint in ports_to_lint:
expectations_dict = port_to_lint.expectations_dict()
for expectations_file in expectations_dict.keys():
if expectations_file in files_linted:
continue
try:
test_expectations.TestExpectations(port_to_lint,
expectations_dict={expectations_file: expectations_dict[expectations_file]},
is_lint_mode=True)
except test_expectations.ParseError as e:
_log.error('')
for warning in e.warnings:
_log.error(warning)
failures.append('%s: %s' % (expectations_file, warning))
_log.error('')
files_linted.add(expectations_file)
return failures
def check_virtual_test_suites(host, options):
port = host.port_factory.get(options=options)
fs = host.filesystem
layout_tests_dir = port.layout_tests_dir()
virtual_suites = port.virtual_test_suites()
failures = []
for suite in virtual_suites:
comps = [layout_tests_dir] + suite.name.split('/') + ['README.txt']
path_to_readme = fs.join(*comps)
if not fs.exists(path_to_readme):
failure = 'LayoutTests/%s/README.txt is missing (each virtual suite must have one).' % suite.name
_log.error(failure)
failures.append(failure)
if failures:
_log.error('')
return failures
def set_up_logging(logging_stream):
logger = logging.getLogger()
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(logging_stream)
logger.addHandler(handler)
return (logger, handler)
def tear_down_logging(logger, handler):
logger.removeHandler(handler)
def run_checks(host, options, logging_stream):
logger, handler = set_up_logging(logging_stream)
try:
failures = []
failures.extend(lint(host, options))
failures.extend(check_virtual_test_suites(host, options))
if options.json:
with open(options.json, 'w') as f:
json.dump(failures, f)
if failures:
_log.error('Lint failed.')
return 1
else:
_log.info('Lint succeeded.')
return 0
finally:
logger.removeHandler(handler)
def main(argv, _, stderr):
parser = optparse.OptionParser(option_list=platform_options(use_globs=True))
parser.add_option('--json', help='Path to JSON output file')
options, _ = parser.parse_args(argv)
if options.platform and 'test' in options.platform:
# It's a bit lame to import mocks into real code, but this allows the user
# to run tests against the test platform interactively, which is useful for
# debugging test failures.
from webkitpy.common.host_mock import MockHost
host = MockHost()
else:
host = Host()
try:
exit_status = run_checks(host, options, stderr)
except KeyboardInterrupt:
exit_status = INTERRUPTED_EXIT_STATUS
except Exception as e:
print >> stderr, '\n%s raised: %s' % (e.__class__.__name__, str(e))
traceback.print_exc(file=stderr)
exit_status = EXCEPTIONAL_EXIT_STATUS
return exit_status
|
djphan/c410-Repo | refs/heads/master | c410-Lab4_5_Flask/env-lab4/lib/python2.7/site-packages/flask/app.py | 427 | # -*- coding: utf-8 -*-
"""
flask.app
~~~~~~~~~
This module implements the central WSGI application object.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
from threading import Lock
from datetime import timedelta
from itertools import chain
from functools import update_wrapper
from werkzeug.datastructures import ImmutableDict
from werkzeug.routing import Map, Rule, RequestRedirect, BuildError
from werkzeug.exceptions import HTTPException, InternalServerError, \
MethodNotAllowed, BadRequest
from .helpers import _PackageBoundObject, url_for, get_flashed_messages, \
locked_cached_property, _endpoint_from_view_func, find_package
from . import json
from .wrappers import Request, Response
from .config import ConfigAttribute, Config
from .ctx import RequestContext, AppContext, _AppCtxGlobals
from .globals import _request_ctx_stack, request, session, g
from .sessions import SecureCookieSessionInterface
from .module import blueprint_is_module
from .templating import DispatchingJinjaLoader, Environment, \
_default_template_ctx_processor
from .signals import request_started, request_finished, got_request_exception, \
request_tearing_down, appcontext_tearing_down
from ._compat import reraise, string_types, text_type, integer_types
# a lock used for logger initialization
_logger_lock = Lock()
def _make_timedelta(value):
if not isinstance(value, timedelta):
return timedelta(seconds=value)
return value
def setupmethod(f):
"""Wraps a method so that it performs a check in debug mode if the
first request was already handled.
"""
def wrapper_func(self, *args, **kwargs):
if self.debug and self._got_first_request:
raise AssertionError('A setup function was called after the '
'first request was handled. This usually indicates a bug '
'in the application where a module was not imported '
'and decorators or other functionality was called too late.\n'
'To fix this make sure to import all your view modules, '
'database models and everything related at a central place '
'before the application starts serving requests.')
return f(self, *args, **kwargs)
return update_wrapper(wrapper_func, f)
class Flask(_PackageBoundObject):
"""The flask object implements a WSGI application and acts as the central
object. It is passed the name of the module or package of the
application. Once it is created it will act as a central registry for
the view functions, the URL rules, template configuration and much more.
The name of the package is used to resolve resources from inside the
package or the folder the module is contained in depending on if the
package parameter resolves to an actual python package (a folder with
an `__init__.py` file inside) or a standard module (just a `.py` file).
For more information about resource loading, see :func:`open_resource`.
Usually you create a :class:`Flask` instance in your main module or
in the `__init__.py` file of your package like this::
from flask import Flask
app = Flask(__name__)
.. admonition:: About the First Parameter
The idea of the first parameter is to give Flask an idea what
belongs to your application. This name is used to find resources
on the file system, can be used by extensions to improve debugging
information and a lot more.
So it's important what you provide there. If you are using a single
module, `__name__` is always the correct value. If you however are
using a package, it's usually recommended to hardcode the name of
your package there.
For example if your application is defined in `yourapplication/app.py`
you should create it with one of the two versions below::
app = Flask('yourapplication')
app = Flask(__name__.split('.')[0])
Why is that? The application will work even with `__name__`, thanks
to how resources are looked up. However it will make debugging more
painful. Certain extensions can make assumptions based on the
import name of your application. For example the Flask-SQLAlchemy
extension will look for the code in your application that triggered
an SQL query in debug mode. If the import name is not properly set
up, that debugging information is lost. (For example it would only
pick up SQL queries in `yourapplication.app` and not
`yourapplication.views.frontend`)
.. versionadded:: 0.7
The `static_url_path`, `static_folder`, and `template_folder`
parameters were added.
.. versionadded:: 0.8
The `instance_path` and `instance_relative_config` parameters were
added.
:param import_name: the name of the application package
:param static_url_path: can be used to specify a different path for the
static files on the web. Defaults to the name
of the `static_folder` folder.
:param static_folder: the folder with static files that should be served
at `static_url_path`. Defaults to the ``'static'``
folder in the root path of the application.
:param template_folder: the folder that contains the templates that should
be used by the application. Defaults to
``'templates'`` folder in the root path of the
application.
:param instance_path: An alternative instance path for the application.
By default the folder ``'instance'`` next to the
package or module is assumed to be the instance
path.
:param instance_relative_config: if set to `True` relative filenames
for loading the config are assumed to
be relative to the instance path instead
of the application root.
"""
#: The class that is used for request objects. See :class:`~flask.Request`
#: for more information.
request_class = Request
#: The class that is used for response objects. See
#: :class:`~flask.Response` for more information.
response_class = Response
#: The class that is used for the :data:`~flask.g` instance.
#:
#: Example use cases for a custom class:
#:
#: 1. Store arbitrary attributes on flask.g.
#: 2. Add a property for lazy per-request database connectors.
#: 3. Return None instead of AttributeError on expected attributes.
#: 4. Raise exception if an unexpected attr is set, a "controlled" flask.g.
#:
#: In Flask 0.9 this property was called `request_globals_class` but it
#: was changed in 0.10 to :attr:`app_ctx_globals_class` because the
#: flask.g object is not application context scoped.
#:
#: .. versionadded:: 0.10
app_ctx_globals_class = _AppCtxGlobals
# Backwards compatibility support
def _get_request_globals_class(self):
return self.app_ctx_globals_class
def _set_request_globals_class(self, value):
from warnings import warn
warn(DeprecationWarning('request_globals_class attribute is now '
'called app_ctx_globals_class'))
self.app_ctx_globals_class = value
request_globals_class = property(_get_request_globals_class,
_set_request_globals_class)
del _get_request_globals_class, _set_request_globals_class
#: The debug flag. Set this to `True` to enable debugging of the
#: application. In debug mode the debugger will kick in when an unhandled
#: exception occurs and the integrated server will automatically reload
#: the application if changes in the code are detected.
#:
#: This attribute can also be configured from the config with the `DEBUG`
#: configuration key. Defaults to `False`.
debug = ConfigAttribute('DEBUG')
#: The testing flag. Set this to `True` to enable the test mode of
#: Flask extensions (and in the future probably also Flask itself).
#: For example this might activate unittest helpers that have an
#: additional runtime cost which should not be enabled by default.
#:
#: If this is enabled and PROPAGATE_EXCEPTIONS is not changed from the
#: default it's implicitly enabled.
#:
#: This attribute can also be configured from the config with the
#: `TESTING` configuration key. Defaults to `False`.
testing = ConfigAttribute('TESTING')
#: If a secret key is set, cryptographic components can use this to
#: sign cookies and other things. Set this to a complex random value
#: when you want to use the secure cookie for instance.
#:
#: This attribute can also be configured from the config with the
#: `SECRET_KEY` configuration key. Defaults to `None`.
secret_key = ConfigAttribute('SECRET_KEY')
#: The secure cookie uses this for the name of the session cookie.
#:
#: This attribute can also be configured from the config with the
#: `SESSION_COOKIE_NAME` configuration key. Defaults to ``'session'``
session_cookie_name = ConfigAttribute('SESSION_COOKIE_NAME')
#: A :class:`~datetime.timedelta` which is used to set the expiration
#: date of a permanent session. The default is 31 days which makes a
#: permanent session survive for roughly one month.
#:
#: This attribute can also be configured from the config with the
#: `PERMANENT_SESSION_LIFETIME` configuration key. Defaults to
#: ``timedelta(days=31)``
permanent_session_lifetime = ConfigAttribute('PERMANENT_SESSION_LIFETIME',
get_converter=_make_timedelta)
#: Enable this if you want to use the X-Sendfile feature. Keep in
#: mind that the server has to support this. This only affects files
#: sent with the :func:`send_file` method.
#:
#: .. versionadded:: 0.2
#:
#: This attribute can also be configured from the config with the
#: `USE_X_SENDFILE` configuration key. Defaults to `False`.
use_x_sendfile = ConfigAttribute('USE_X_SENDFILE')
#: The name of the logger to use. By default the logger name is the
#: package name passed to the constructor.
#:
#: .. versionadded:: 0.4
logger_name = ConfigAttribute('LOGGER_NAME')
#: Enable the deprecated module support? This is active by default
#: in 0.7 but will be changed to False in 0.8. With Flask 1.0 modules
#: will be removed in favor of Blueprints
enable_modules = True
#: The logging format used for the debug logger. This is only used when
#: the application is in debug mode, otherwise the attached logging
#: handler does the formatting.
#:
#: .. versionadded:: 0.3
debug_log_format = (
'-' * 80 + '\n' +
'%(levelname)s in %(module)s [%(pathname)s:%(lineno)d]:\n' +
'%(message)s\n' +
'-' * 80
)
#: The JSON encoder class to use. Defaults to :class:`~flask.json.JSONEncoder`.
#:
#: .. versionadded:: 0.10
json_encoder = json.JSONEncoder
#: The JSON decoder class to use. Defaults to :class:`~flask.json.JSONDecoder`.
#:
#: .. versionadded:: 0.10
json_decoder = json.JSONDecoder
#: Options that are passed directly to the Jinja2 environment.
jinja_options = ImmutableDict(
extensions=['jinja2.ext.autoescape', 'jinja2.ext.with_']
)
#: Default configuration parameters.
default_config = ImmutableDict({
'DEBUG': False,
'TESTING': False,
'PROPAGATE_EXCEPTIONS': None,
'PRESERVE_CONTEXT_ON_EXCEPTION': None,
'SECRET_KEY': None,
'PERMANENT_SESSION_LIFETIME': timedelta(days=31),
'USE_X_SENDFILE': False,
'LOGGER_NAME': None,
'SERVER_NAME': None,
'APPLICATION_ROOT': None,
'SESSION_COOKIE_NAME': 'session',
'SESSION_COOKIE_DOMAIN': None,
'SESSION_COOKIE_PATH': None,
'SESSION_COOKIE_HTTPONLY': True,
'SESSION_COOKIE_SECURE': False,
'MAX_CONTENT_LENGTH': None,
'SEND_FILE_MAX_AGE_DEFAULT': 12 * 60 * 60, # 12 hours
'TRAP_BAD_REQUEST_ERRORS': False,
'TRAP_HTTP_EXCEPTIONS': False,
'PREFERRED_URL_SCHEME': 'http',
'JSON_AS_ASCII': True,
'JSON_SORT_KEYS': True,
'JSONIFY_PRETTYPRINT_REGULAR': True,
})
#: The rule object to use for URL rules created. This is used by
#: :meth:`add_url_rule`. Defaults to :class:`werkzeug.routing.Rule`.
#:
#: .. versionadded:: 0.7
url_rule_class = Rule
#: the test client that is used with when `test_client` is used.
#:
#: .. versionadded:: 0.7
test_client_class = None
#: the session interface to use. By default an instance of
#: :class:`~flask.sessions.SecureCookieSessionInterface` is used here.
#:
#: .. versionadded:: 0.8
session_interface = SecureCookieSessionInterface()
def __init__(self, import_name, static_path=None, static_url_path=None,
static_folder='static', template_folder='templates',
instance_path=None, instance_relative_config=False):
_PackageBoundObject.__init__(self, import_name,
template_folder=template_folder)
if static_path is not None:
from warnings import warn
warn(DeprecationWarning('static_path is now called '
'static_url_path'), stacklevel=2)
static_url_path = static_path
if static_url_path is not None:
self.static_url_path = static_url_path
if static_folder is not None:
self.static_folder = static_folder
if instance_path is None:
instance_path = self.auto_find_instance_path()
elif not os.path.isabs(instance_path):
raise ValueError('If an instance path is provided it must be '
'absolute. A relative path was given instead.')
#: Holds the path to the instance folder.
#:
#: .. versionadded:: 0.8
self.instance_path = instance_path
#: The configuration dictionary as :class:`Config`. This behaves
#: exactly like a regular dictionary but supports additional methods
#: to load a config from files.
self.config = self.make_config(instance_relative_config)
# Prepare the deferred setup of the logger.
self._logger = None
self.logger_name = self.import_name
#: A dictionary of all view functions registered. The keys will
#: be function names which are also used to generate URLs and
#: the values are the function objects themselves.
#: To register a view function, use the :meth:`route` decorator.
self.view_functions = {}
# support for the now deprecated `error_handlers` attribute. The
# :attr:`error_handler_spec` shall be used now.
self._error_handlers = {}
#: A dictionary of all registered error handlers. The key is `None`
#: for error handlers active on the application, otherwise the key is
#: the name of the blueprint. Each key points to another dictionary
#: where they key is the status code of the http exception. The
#: special key `None` points to a list of tuples where the first item
#: is the class for the instance check and the second the error handler
#: function.
#:
#: To register a error handler, use the :meth:`errorhandler`
#: decorator.
self.error_handler_spec = {None: self._error_handlers}
#: A list of functions that are called when :meth:`url_for` raises a
#: :exc:`~werkzeug.routing.BuildError`. Each function registered here
#: is called with `error`, `endpoint` and `values`. If a function
#: returns `None` or raises a `BuildError` the next function is
#: tried.
#:
#: .. versionadded:: 0.9
self.url_build_error_handlers = []
#: A dictionary with lists of functions that should be called at the
#: beginning of the request. The key of the dictionary is the name of
#: the blueprint this function is active for, `None` for all requests.
#: This can for example be used to open database connections or
#: getting hold of the currently logged in user. To register a
#: function here, use the :meth:`before_request` decorator.
self.before_request_funcs = {}
#: A lists of functions that should be called at the beginning of the
#: first request to this instance. To register a function here, use
#: the :meth:`before_first_request` decorator.
#:
#: .. versionadded:: 0.8
self.before_first_request_funcs = []
#: A dictionary with lists of functions that should be called after
#: each request. The key of the dictionary is the name of the blueprint
#: this function is active for, `None` for all requests. This can for
#: example be used to open database connections or getting hold of the
#: currently logged in user. To register a function here, use the
#: :meth:`after_request` decorator.
self.after_request_funcs = {}
#: A dictionary with lists of functions that are called after
#: each request, even if an exception has occurred. The key of the
#: dictionary is the name of the blueprint this function is active for,
#: `None` for all requests. These functions are not allowed to modify
#: the request, and their return values are ignored. If an exception
#: occurred while processing the request, it gets passed to each
#: teardown_request function. To register a function here, use the
#: :meth:`teardown_request` decorator.
#:
#: .. versionadded:: 0.7
self.teardown_request_funcs = {}
#: A list of functions that are called when the application context
#: is destroyed. Since the application context is also torn down
#: if the request ends this is the place to store code that disconnects
#: from databases.
#:
#: .. versionadded:: 0.9
self.teardown_appcontext_funcs = []
#: A dictionary with lists of functions that can be used as URL
#: value processor functions. Whenever a URL is built these functions
#: are called to modify the dictionary of values in place. The key
#: `None` here is used for application wide
#: callbacks, otherwise the key is the name of the blueprint.
#: Each of these functions has the chance to modify the dictionary
#:
#: .. versionadded:: 0.7
self.url_value_preprocessors = {}
#: A dictionary with lists of functions that can be used as URL value
#: preprocessors. The key `None` here is used for application wide
#: callbacks, otherwise the key is the name of the blueprint.
#: Each of these functions has the chance to modify the dictionary
#: of URL values before they are used as the keyword arguments of the
#: view function. For each function registered this one should also
#: provide a :meth:`url_defaults` function that adds the parameters
#: automatically again that were removed that way.
#:
#: .. versionadded:: 0.7
self.url_default_functions = {}
#: A dictionary with list of functions that are called without argument
#: to populate the template context. The key of the dictionary is the
#: name of the blueprint this function is active for, `None` for all
#: requests. Each returns a dictionary that the template context is
#: updated with. To register a function here, use the
#: :meth:`context_processor` decorator.
self.template_context_processors = {
None: [_default_template_ctx_processor]
}
#: all the attached blueprints in a dictionary by name. Blueprints
#: can be attached multiple times so this dictionary does not tell
#: you how often they got attached.
#:
#: .. versionadded:: 0.7
self.blueprints = {}
#: a place where extensions can store application specific state. For
#: example this is where an extension could store database engines and
#: similar things. For backwards compatibility extensions should register
#: themselves like this::
#:
#: if not hasattr(app, 'extensions'):
#: app.extensions = {}
#: app.extensions['extensionname'] = SomeObject()
#:
#: The key must match the name of the `flaskext` module. For example in
#: case of a "Flask-Foo" extension in `flaskext.foo`, the key would be
#: ``'foo'``.
#:
#: .. versionadded:: 0.7
self.extensions = {}
#: The :class:`~werkzeug.routing.Map` for this instance. You can use
#: this to change the routing converters after the class was created
#: but before any routes are connected. Example::
#:
#: from werkzeug.routing import BaseConverter
#:
#: class ListConverter(BaseConverter):
#: def to_python(self, value):
#: return value.split(',')
#: def to_url(self, values):
#: return ','.join(BaseConverter.to_url(value)
#: for value in values)
#:
#: app = Flask(__name__)
#: app.url_map.converters['list'] = ListConverter
self.url_map = Map()
# tracks internally if the application already handled at least one
# request.
self._got_first_request = False
self._before_request_lock = Lock()
# register the static folder for the application. Do that even
# if the folder does not exist. First of all it might be created
# while the server is running (usually happens during development)
# but also because google appengine stores static files somewhere
# else when mapped with the .yml file.
if self.has_static_folder:
self.add_url_rule(self.static_url_path + '/<path:filename>',
endpoint='static',
view_func=self.send_static_file)
def _get_error_handlers(self):
from warnings import warn
warn(DeprecationWarning('error_handlers is deprecated, use the '
'new error_handler_spec attribute instead.'), stacklevel=1)
return self._error_handlers
def _set_error_handlers(self, value):
self._error_handlers = value
self.error_handler_spec[None] = value
error_handlers = property(_get_error_handlers, _set_error_handlers)
del _get_error_handlers, _set_error_handlers
@locked_cached_property
def name(self):
"""The name of the application. This is usually the import name
with the difference that it's guessed from the run file if the
import name is main. This name is used as a display name when
Flask needs the name of the application. It can be set and overridden
to change the value.
.. versionadded:: 0.8
"""
if self.import_name == '__main__':
fn = getattr(sys.modules['__main__'], '__file__', None)
if fn is None:
return '__main__'
return os.path.splitext(os.path.basename(fn))[0]
return self.import_name
@property
def propagate_exceptions(self):
"""Returns the value of the `PROPAGATE_EXCEPTIONS` configuration
value in case it's set, otherwise a sensible default is returned.
.. versionadded:: 0.7
"""
rv = self.config['PROPAGATE_EXCEPTIONS']
if rv is not None:
return rv
return self.testing or self.debug
@property
def preserve_context_on_exception(self):
"""Returns the value of the `PRESERVE_CONTEXT_ON_EXCEPTION`
configuration value in case it's set, otherwise a sensible default
is returned.
.. versionadded:: 0.7
"""
rv = self.config['PRESERVE_CONTEXT_ON_EXCEPTION']
if rv is not None:
return rv
return self.debug
@property
def logger(self):
"""A :class:`logging.Logger` object for this application. The
default configuration is to log to stderr if the application is
in debug mode. This logger can be used to (surprise) log messages.
Here some examples::
app.logger.debug('A value for debugging')
app.logger.warning('A warning occurred (%d apples)', 42)
app.logger.error('An error occurred')
.. versionadded:: 0.3
"""
if self._logger and self._logger.name == self.logger_name:
return self._logger
with _logger_lock:
if self._logger and self._logger.name == self.logger_name:
return self._logger
from flask.logging import create_logger
self._logger = rv = create_logger(self)
return rv
@locked_cached_property
def jinja_env(self):
"""The Jinja2 environment used to load templates."""
return self.create_jinja_environment()
@property
def got_first_request(self):
"""This attribute is set to `True` if the application started
handling the first request.
.. versionadded:: 0.8
"""
return self._got_first_request
def make_config(self, instance_relative=False):
"""Used to create the config attribute by the Flask constructor.
The `instance_relative` parameter is passed in from the constructor
of Flask (there named `instance_relative_config`) and indicates if
the config should be relative to the instance path or the root path
of the application.
.. versionadded:: 0.8
"""
root_path = self.root_path
if instance_relative:
root_path = self.instance_path
return Config(root_path, self.default_config)
def auto_find_instance_path(self):
"""Tries to locate the instance path if it was not provided to the
constructor of the application class. It will basically calculate
the path to a folder named ``instance`` next to your main file or
the package.
.. versionadded:: 0.8
"""
prefix, package_path = find_package(self.import_name)
if prefix is None:
return os.path.join(package_path, 'instance')
return os.path.join(prefix, 'var', self.name + '-instance')
def open_instance_resource(self, resource, mode='rb'):
"""Opens a resource from the application's instance folder
(:attr:`instance_path`). Otherwise works like
:meth:`open_resource`. Instance resources can also be opened for
writing.
:param resource: the name of the resource. To access resources within
subfolders use forward slashes as separator.
:param mode: resource file opening mode, default is 'rb'.
"""
return open(os.path.join(self.instance_path, resource), mode)
def create_jinja_environment(self):
"""Creates the Jinja2 environment based on :attr:`jinja_options`
and :meth:`select_jinja_autoescape`. Since 0.7 this also adds
the Jinja2 globals and filters after initialization. Override
this function to customize the behavior.
.. versionadded:: 0.5
"""
options = dict(self.jinja_options)
if 'autoescape' not in options:
options['autoescape'] = self.select_jinja_autoescape
rv = Environment(self, **options)
rv.globals.update(
url_for=url_for,
get_flashed_messages=get_flashed_messages,
config=self.config,
# request, session and g are normally added with the
# context processor for efficiency reasons but for imported
# templates we also want the proxies in there.
request=request,
session=session,
g=g
)
rv.filters['tojson'] = json.tojson_filter
return rv
def create_global_jinja_loader(self):
"""Creates the loader for the Jinja2 environment. Can be used to
override just the loader and keeping the rest unchanged. It's
discouraged to override this function. Instead one should override
the :meth:`jinja_loader` function instead.
The global loader dispatches between the loaders of the application
and the individual blueprints.
.. versionadded:: 0.7
"""
return DispatchingJinjaLoader(self)
def init_jinja_globals(self):
"""Deprecated. Used to initialize the Jinja2 globals.
.. versionadded:: 0.5
.. versionchanged:: 0.7
This method is deprecated with 0.7. Override
:meth:`create_jinja_environment` instead.
"""
def select_jinja_autoescape(self, filename):
"""Returns `True` if autoescaping should be active for the given
template name.
.. versionadded:: 0.5
"""
if filename is None:
return False
return filename.endswith(('.html', '.htm', '.xml', '.xhtml'))
def update_template_context(self, context):
"""Update the template context with some commonly used variables.
This injects request, session, config and g into the template
context as well as everything template context processors want
to inject. Note that the as of Flask 0.6, the original values
in the context will not be overridden if a context processor
decides to return a value with the same key.
:param context: the context as a dictionary that is updated in place
to add extra variables.
"""
funcs = self.template_context_processors[None]
reqctx = _request_ctx_stack.top
if reqctx is not None:
bp = reqctx.request.blueprint
if bp is not None and bp in self.template_context_processors:
funcs = chain(funcs, self.template_context_processors[bp])
orig_ctx = context.copy()
for func in funcs:
context.update(func())
# make sure the original values win. This makes it possible to
# easier add new variables in context processors without breaking
# existing views.
context.update(orig_ctx)
def run(self, host=None, port=None, debug=None, **options):
"""Runs the application on a local development server. If the
:attr:`debug` flag is set the server will automatically reload
for code changes and show a debugger in case an exception happened.
If you want to run the application in debug mode, but disable the
code execution on the interactive debugger, you can pass
``use_evalex=False`` as parameter. This will keep the debugger's
traceback screen active, but disable code execution.
.. admonition:: Keep in Mind
Flask will suppress any server error with a generic error page
unless it is in debug mode. As such to enable just the
interactive debugger without the code reloading, you have to
invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``.
Setting ``use_debugger`` to `True` without being in debug mode
won't catch any exceptions because there won't be any to
catch.
.. versionchanged:: 0.10
The default port is now picked from the ``SERVER_NAME`` variable.
:param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to
have the server available externally as well. Defaults to
``'127.0.0.1'``.
:param port: the port of the webserver. Defaults to ``5000`` or the
port defined in the ``SERVER_NAME`` config variable if
present.
:param debug: if given, enable or disable debug mode.
See :attr:`debug`.
:param options: the options to be forwarded to the underlying
Werkzeug server. See
:func:`werkzeug.serving.run_simple` for more
information.
"""
from werkzeug.serving import run_simple
if host is None:
host = '127.0.0.1'
if port is None:
server_name = self.config['SERVER_NAME']
if server_name and ':' in server_name:
port = int(server_name.rsplit(':', 1)[1])
else:
port = 5000
if debug is not None:
self.debug = bool(debug)
options.setdefault('use_reloader', self.debug)
options.setdefault('use_debugger', self.debug)
try:
run_simple(host, port, self, **options)
finally:
# reset the first request information if the development server
# resetted normally. This makes it possible to restart the server
# without reloader and that stuff from an interactive shell.
self._got_first_request = False
def test_client(self, use_cookies=True):
"""Creates a test client for this application. For information
about unit testing head over to :ref:`testing`.
Note that if you are testing for assertions or exceptions in your
application code, you must set ``app.testing = True`` in order for the
exceptions to propagate to the test client. Otherwise, the exception
will be handled by the application (not visible to the test client) and
the only indication of an AssertionError or other exception will be a
500 status code response to the test client. See the :attr:`testing`
attribute. For example::
app.testing = True
client = app.test_client()
The test client can be used in a `with` block to defer the closing down
of the context until the end of the `with` block. This is useful if
you want to access the context locals for testing::
with app.test_client() as c:
rv = c.get('/?vodka=42')
assert request.args['vodka'] == '42'
See :class:`~flask.testing.FlaskClient` for more information.
.. versionchanged:: 0.4
added support for `with` block usage for the client.
.. versionadded:: 0.7
The `use_cookies` parameter was added as well as the ability
to override the client to be used by setting the
:attr:`test_client_class` attribute.
"""
cls = self.test_client_class
if cls is None:
from flask.testing import FlaskClient as cls
return cls(self, self.response_class, use_cookies=use_cookies)
def open_session(self, request):
"""Creates or opens a new session. Default implementation stores all
session data in a signed cookie. This requires that the
:attr:`secret_key` is set. Instead of overriding this method
we recommend replacing the :class:`session_interface`.
:param request: an instance of :attr:`request_class`.
"""
return self.session_interface.open_session(self, request)
def save_session(self, session, response):
"""Saves the session if it needs updates. For the default
implementation, check :meth:`open_session`. Instead of overriding this
method we recommend replacing the :class:`session_interface`.
:param session: the session to be saved (a
:class:`~werkzeug.contrib.securecookie.SecureCookie`
object)
:param response: an instance of :attr:`response_class`
"""
return self.session_interface.save_session(self, session, response)
def make_null_session(self):
"""Creates a new instance of a missing session. Instead of overriding
this method we recommend replacing the :class:`session_interface`.
.. versionadded:: 0.7
"""
return self.session_interface.make_null_session(self)
def register_module(self, module, **options):
"""Registers a module with this application. The keyword argument
of this function are the same as the ones for the constructor of the
:class:`Module` class and will override the values of the module if
provided.
.. versionchanged:: 0.7
The module system was deprecated in favor for the blueprint
system.
"""
assert blueprint_is_module(module), 'register_module requires ' \
'actual module objects. Please upgrade to blueprints though.'
if not self.enable_modules:
raise RuntimeError('Module support was disabled but code '
'attempted to register a module named %r' % module)
else:
from warnings import warn
warn(DeprecationWarning('Modules are deprecated. Upgrade to '
'using blueprints. Have a look into the documentation for '
'more information. If this module was registered by a '
'Flask-Extension upgrade the extension or contact the author '
'of that extension instead. (Registered %r)' % module),
stacklevel=2)
self.register_blueprint(module, **options)
@setupmethod
def register_blueprint(self, blueprint, **options):
"""Registers a blueprint on the application.
.. versionadded:: 0.7
"""
first_registration = False
if blueprint.name in self.blueprints:
assert self.blueprints[blueprint.name] is blueprint, \
'A blueprint\'s name collision occurred between %r and ' \
'%r. Both share the same name "%s". Blueprints that ' \
'are created on the fly need unique names.' % \
(blueprint, self.blueprints[blueprint.name], blueprint.name)
else:
self.blueprints[blueprint.name] = blueprint
first_registration = True
blueprint.register(self, options, first_registration)
@setupmethod
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
"""Connects a URL rule. Works exactly like the :meth:`route`
decorator. If a view_func is provided it will be registered with the
endpoint.
Basically this example::
@app.route('/')
def index():
pass
Is equivalent to the following::
def index():
pass
app.add_url_rule('/', 'index', index)
If the view_func is not provided you will need to connect the endpoint
to a view function like so::
app.view_functions['index'] = index
Internally :meth:`route` invokes :meth:`add_url_rule` so if you want
to customize the behavior via subclassing you only need to change
this method.
For more information refer to :ref:`url-route-registrations`.
.. versionchanged:: 0.2
`view_func` parameter added.
.. versionchanged:: 0.6
`OPTIONS` is added automatically as method.
:param rule: the URL rule as string
:param endpoint: the endpoint for the registered URL rule. Flask
itself assumes the name of the view function as
endpoint
:param view_func: the function to call when serving a request to the
provided endpoint
:param options: the options to be forwarded to the underlying
:class:`~werkzeug.routing.Rule` object. A change
to Werkzeug is handling of method options. methods
is a list of methods this rule should be limited
to (`GET`, `POST` etc.). By default a rule
just listens for `GET` (and implicitly `HEAD`).
Starting with Flask 0.6, `OPTIONS` is implicitly
added and handled by the standard request handling.
"""
if endpoint is None:
endpoint = _endpoint_from_view_func(view_func)
options['endpoint'] = endpoint
methods = options.pop('methods', None)
# if the methods are not given and the view_func object knows its
# methods we can use that instead. If neither exists, we go with
# a tuple of only `GET` as default.
if methods is None:
methods = getattr(view_func, 'methods', None) or ('GET',)
methods = set(methods)
# Methods that should always be added
required_methods = set(getattr(view_func, 'required_methods', ()))
# starting with Flask 0.8 the view_func object can disable and
# force-enable the automatic options handling.
provide_automatic_options = getattr(view_func,
'provide_automatic_options', None)
if provide_automatic_options is None:
if 'OPTIONS' not in methods:
provide_automatic_options = True
required_methods.add('OPTIONS')
else:
provide_automatic_options = False
# Add the required methods now.
methods |= required_methods
# due to a werkzeug bug we need to make sure that the defaults are
# None if they are an empty dictionary. This should not be necessary
# with Werkzeug 0.7
options['defaults'] = options.get('defaults') or None
rule = self.url_rule_class(rule, methods=methods, **options)
rule.provide_automatic_options = provide_automatic_options
self.url_map.add(rule)
if view_func is not None:
old_func = self.view_functions.get(endpoint)
if old_func is not None and old_func != view_func:
raise AssertionError('View function mapping is overwriting an '
'existing endpoint function: %s' % endpoint)
self.view_functions[endpoint] = view_func
def route(self, rule, **options):
"""A decorator that is used to register a view function for a
given URL rule. This does the same thing as :meth:`add_url_rule`
but is intended for decorator usage::
@app.route('/')
def index():
return 'Hello World'
For more information refer to :ref:`url-route-registrations`.
:param rule: the URL rule as string
:param endpoint: the endpoint for the registered URL rule. Flask
itself assumes the name of the view function as
endpoint
:param options: the options to be forwarded to the underlying
:class:`~werkzeug.routing.Rule` object. A change
to Werkzeug is handling of method options. methods
is a list of methods this rule should be limited
to (`GET`, `POST` etc.). By default a rule
just listens for `GET` (and implicitly `HEAD`).
Starting with Flask 0.6, `OPTIONS` is implicitly
added and handled by the standard request handling.
"""
def decorator(f):
endpoint = options.pop('endpoint', None)
self.add_url_rule(rule, endpoint, f, **options)
return f
return decorator
@setupmethod
def endpoint(self, endpoint):
"""A decorator to register a function as an endpoint.
Example::
@app.endpoint('example.endpoint')
def example():
return "example"
:param endpoint: the name of the endpoint
"""
def decorator(f):
self.view_functions[endpoint] = f
return f
return decorator
@setupmethod
def errorhandler(self, code_or_exception):
"""A decorator that is used to register a function give a given
error code. Example::
@app.errorhandler(404)
def page_not_found(error):
return 'This page does not exist', 404
You can also register handlers for arbitrary exceptions::
@app.errorhandler(DatabaseError)
def special_exception_handler(error):
return 'Database connection failed', 500
You can also register a function as error handler without using
the :meth:`errorhandler` decorator. The following example is
equivalent to the one above::
def page_not_found(error):
return 'This page does not exist', 404
app.error_handler_spec[None][404] = page_not_found
Setting error handlers via assignments to :attr:`error_handler_spec`
however is discouraged as it requires fiddling with nested dictionaries
and the special case for arbitrary exception types.
The first `None` refers to the active blueprint. If the error
handler should be application wide `None` shall be used.
.. versionadded:: 0.7
One can now additionally also register custom exception types
that do not necessarily have to be a subclass of the
:class:`~werkzeug.exceptions.HTTPException` class.
:param code: the code as integer for the handler
"""
def decorator(f):
self._register_error_handler(None, code_or_exception, f)
return f
return decorator
def register_error_handler(self, code_or_exception, f):
"""Alternative error attach function to the :meth:`errorhandler`
decorator that is more straightforward to use for non decorator
usage.
.. versionadded:: 0.7
"""
self._register_error_handler(None, code_or_exception, f)
@setupmethod
def _register_error_handler(self, key, code_or_exception, f):
if isinstance(code_or_exception, HTTPException):
code_or_exception = code_or_exception.code
if isinstance(code_or_exception, integer_types):
assert code_or_exception != 500 or key is None, \
'It is currently not possible to register a 500 internal ' \
'server error on a per-blueprint level.'
self.error_handler_spec.setdefault(key, {})[code_or_exception] = f
else:
self.error_handler_spec.setdefault(key, {}).setdefault(None, []) \
.append((code_or_exception, f))
@setupmethod
def template_filter(self, name=None):
"""A decorator that is used to register custom template filter.
You can specify a name for the filter, otherwise the function
name will be used. Example::
@app.template_filter()
def reverse(s):
return s[::-1]
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
def decorator(f):
self.add_template_filter(f, name=name)
return f
return decorator
@setupmethod
def add_template_filter(self, f, name=None):
"""Register a custom template filter. Works exactly like the
:meth:`template_filter` decorator.
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
self.jinja_env.filters[name or f.__name__] = f
@setupmethod
def template_test(self, name=None):
"""A decorator that is used to register custom template test.
You can specify a name for the test, otherwise the function
name will be used. Example::
@app.template_test()
def is_prime(n):
if n == 2:
return True
for i in range(2, int(math.ceil(math.sqrt(n))) + 1):
if n % i == 0:
return False
return True
.. versionadded:: 0.10
:param name: the optional name of the test, otherwise the
function name will be used.
"""
def decorator(f):
self.add_template_test(f, name=name)
return f
return decorator
@setupmethod
def add_template_test(self, f, name=None):
"""Register a custom template test. Works exactly like the
:meth:`template_test` decorator.
.. versionadded:: 0.10
:param name: the optional name of the test, otherwise the
function name will be used.
"""
self.jinja_env.tests[name or f.__name__] = f
@setupmethod
def template_global(self, name=None):
"""A decorator that is used to register a custom template global function.
You can specify a name for the global function, otherwise the function
name will be used. Example::
@app.template_global()
def double(n):
return 2 * n
.. versionadded:: 0.10
:param name: the optional name of the global function, otherwise the
function name will be used.
"""
def decorator(f):
self.add_template_global(f, name=name)
return f
return decorator
@setupmethod
def add_template_global(self, f, name=None):
"""Register a custom template global function. Works exactly like the
:meth:`template_global` decorator.
.. versionadded:: 0.10
:param name: the optional name of the global function, otherwise the
function name will be used.
"""
self.jinja_env.globals[name or f.__name__] = f
@setupmethod
def before_request(self, f):
"""Registers a function to run before each request."""
self.before_request_funcs.setdefault(None, []).append(f)
return f
@setupmethod
def before_first_request(self, f):
"""Registers a function to be run before the first request to this
instance of the application.
.. versionadded:: 0.8
"""
self.before_first_request_funcs.append(f)
@setupmethod
def after_request(self, f):
"""Register a function to be run after each request. Your function
must take one parameter, a :attr:`response_class` object and return
a new response object or the same (see :meth:`process_response`).
As of Flask 0.7 this function might not be executed at the end of the
request in case an unhandled exception occurred.
"""
self.after_request_funcs.setdefault(None, []).append(f)
return f
@setupmethod
def teardown_request(self, f):
"""Register a function to be run at the end of each request,
regardless of whether there was an exception or not. These functions
are executed when the request context is popped, even if not an
actual request was performed.
Example::
ctx = app.test_request_context()
ctx.push()
...
ctx.pop()
When ``ctx.pop()`` is executed in the above example, the teardown
functions are called just before the request context moves from the
stack of active contexts. This becomes relevant if you are using
such constructs in tests.
Generally teardown functions must take every necessary step to avoid
that they will fail. If they do execute code that might fail they
will have to surround the execution of these code by try/except
statements and log occurring errors.
When a teardown function was called because of a exception it will
be passed an error object.
.. admonition:: Debug Note
In debug mode Flask will not tear down a request on an exception
immediately. Instead if will keep it alive so that the interactive
debugger can still access it. This behavior can be controlled
by the ``PRESERVE_CONTEXT_ON_EXCEPTION`` configuration variable.
"""
self.teardown_request_funcs.setdefault(None, []).append(f)
return f
@setupmethod
def teardown_appcontext(self, f):
"""Registers a function to be called when the application context
ends. These functions are typically also called when the request
context is popped.
Example::
ctx = app.app_context()
ctx.push()
...
ctx.pop()
When ``ctx.pop()`` is executed in the above example, the teardown
functions are called just before the app context moves from the
stack of active contexts. This becomes relevant if you are using
such constructs in tests.
Since a request context typically also manages an application
context it would also be called when you pop a request context.
When a teardown function was called because of an exception it will
be passed an error object.
.. versionadded:: 0.9
"""
self.teardown_appcontext_funcs.append(f)
return f
@setupmethod
def context_processor(self, f):
"""Registers a template context processor function."""
self.template_context_processors[None].append(f)
return f
@setupmethod
def url_value_preprocessor(self, f):
"""Registers a function as URL value preprocessor for all view
functions of the application. It's called before the view functions
are called and can modify the url values provided.
"""
self.url_value_preprocessors.setdefault(None, []).append(f)
return f
@setupmethod
def url_defaults(self, f):
"""Callback function for URL defaults for all view functions of the
application. It's called with the endpoint and values and should
update the values passed in place.
"""
self.url_default_functions.setdefault(None, []).append(f)
return f
def handle_http_exception(self, e):
"""Handles an HTTP exception. By default this will invoke the
registered error handlers and fall back to returning the
exception as response.
.. versionadded:: 0.3
"""
handlers = self.error_handler_spec.get(request.blueprint)
# Proxy exceptions don't have error codes. We want to always return
# those unchanged as errors
if e.code is None:
return e
if handlers and e.code in handlers:
handler = handlers[e.code]
else:
handler = self.error_handler_spec[None].get(e.code)
if handler is None:
return e
return handler(e)
def trap_http_exception(self, e):
"""Checks if an HTTP exception should be trapped or not. By default
this will return `False` for all exceptions except for a bad request
key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to `True`. It
also returns `True` if ``TRAP_HTTP_EXCEPTIONS`` is set to `True`.
This is called for all HTTP exceptions raised by a view function.
If it returns `True` for any exception the error handler for this
exception is not called and it shows up as regular exception in the
traceback. This is helpful for debugging implicitly raised HTTP
exceptions.
.. versionadded:: 0.8
"""
if self.config['TRAP_HTTP_EXCEPTIONS']:
return True
if self.config['TRAP_BAD_REQUEST_ERRORS']:
return isinstance(e, BadRequest)
return False
def handle_user_exception(self, e):
"""This method is called whenever an exception occurs that should be
handled. A special case are
:class:`~werkzeug.exception.HTTPException`\s which are forwarded by
this function to the :meth:`handle_http_exception` method. This
function will either return a response value or reraise the
exception with the same traceback.
.. versionadded:: 0.7
"""
exc_type, exc_value, tb = sys.exc_info()
assert exc_value is e
# ensure not to trash sys.exc_info() at that point in case someone
# wants the traceback preserved in handle_http_exception. Of course
# we cannot prevent users from trashing it themselves in a custom
# trap_http_exception method so that's their fault then.
if isinstance(e, HTTPException) and not self.trap_http_exception(e):
return self.handle_http_exception(e)
blueprint_handlers = ()
handlers = self.error_handler_spec.get(request.blueprint)
if handlers is not None:
blueprint_handlers = handlers.get(None, ())
app_handlers = self.error_handler_spec[None].get(None, ())
for typecheck, handler in chain(blueprint_handlers, app_handlers):
if isinstance(e, typecheck):
return handler(e)
reraise(exc_type, exc_value, tb)
def handle_exception(self, e):
"""Default exception handling that kicks in when an exception
occurs that is not caught. In debug mode the exception will
be re-raised immediately, otherwise it is logged and the handler
for a 500 internal server error is used. If no such handler
exists, a default 500 internal server error message is displayed.
.. versionadded:: 0.3
"""
exc_type, exc_value, tb = sys.exc_info()
got_request_exception.send(self, exception=e)
handler = self.error_handler_spec[None].get(500)
if self.propagate_exceptions:
# if we want to repropagate the exception, we can attempt to
# raise it with the whole traceback in case we can do that
# (the function was actually called from the except part)
# otherwise, we just raise the error again
if exc_value is e:
reraise(exc_type, exc_value, tb)
else:
raise e
self.log_exception((exc_type, exc_value, tb))
if handler is None:
return InternalServerError()
return handler(e)
def log_exception(self, exc_info):
"""Logs an exception. This is called by :meth:`handle_exception`
if debugging is disabled and right before the handler is called.
The default implementation logs the exception as error on the
:attr:`logger`.
.. versionadded:: 0.8
"""
self.logger.error('Exception on %s [%s]' % (
request.path,
request.method
), exc_info=exc_info)
def raise_routing_exception(self, request):
"""Exceptions that are recording during routing are reraised with
this method. During debug we are not reraising redirect requests
for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising
a different error instead to help debug situations.
:internal:
"""
if not self.debug \
or not isinstance(request.routing_exception, RequestRedirect) \
or request.method in ('GET', 'HEAD', 'OPTIONS'):
raise request.routing_exception
from .debughelpers import FormDataRoutingRedirect
raise FormDataRoutingRedirect(request)
def dispatch_request(self):
"""Does the request dispatching. Matches the URL and returns the
return value of the view or error handler. This does not have to
be a response object. In order to convert the return value to a
proper response object, call :func:`make_response`.
.. versionchanged:: 0.7
This no longer does the exception handling, this code was
moved to the new :meth:`full_dispatch_request`.
"""
req = _request_ctx_stack.top.request
if req.routing_exception is not None:
self.raise_routing_exception(req)
rule = req.url_rule
# if we provide automatic options for this URL and the
# request came with the OPTIONS method, reply automatically
if getattr(rule, 'provide_automatic_options', False) \
and req.method == 'OPTIONS':
return self.make_default_options_response()
# otherwise dispatch to the handler for that endpoint
return self.view_functions[rule.endpoint](**req.view_args)
def full_dispatch_request(self):
"""Dispatches the request and on top of that performs request
pre and postprocessing as well as HTTP exception catching and
error handling.
.. versionadded:: 0.7
"""
self.try_trigger_before_first_request_functions()
try:
request_started.send(self)
rv = self.preprocess_request()
if rv is None:
rv = self.dispatch_request()
except Exception as e:
rv = self.handle_user_exception(e)
response = self.make_response(rv)
response = self.process_response(response)
request_finished.send(self, response=response)
return response
def try_trigger_before_first_request_functions(self):
"""Called before each request and will ensure that it triggers
the :attr:`before_first_request_funcs` and only exactly once per
application instance (which means process usually).
:internal:
"""
if self._got_first_request:
return
with self._before_request_lock:
if self._got_first_request:
return
self._got_first_request = True
for func in self.before_first_request_funcs:
func()
def make_default_options_response(self):
"""This method is called to create the default `OPTIONS` response.
This can be changed through subclassing to change the default
behavior of `OPTIONS` responses.
.. versionadded:: 0.7
"""
adapter = _request_ctx_stack.top.url_adapter
if hasattr(adapter, 'allowed_methods'):
methods = adapter.allowed_methods()
else:
# fallback for Werkzeug < 0.7
methods = []
try:
adapter.match(method='--')
except MethodNotAllowed as e:
methods = e.valid_methods
except HTTPException as e:
pass
rv = self.response_class()
rv.allow.update(methods)
return rv
def should_ignore_error(self, error):
"""This is called to figure out if an error should be ignored
or not as far as the teardown system is concerned. If this
function returns `True` then the teardown handlers will not be
passed the error.
.. versionadded:: 0.10
"""
return False
def make_response(self, rv):
"""Converts the return value from a view function to a real
response object that is an instance of :attr:`response_class`.
The following types are allowed for `rv`:
.. tabularcolumns:: |p{3.5cm}|p{9.5cm}|
======================= ===========================================
:attr:`response_class` the object is returned unchanged
:class:`str` a response object is created with the
string as body
:class:`unicode` a response object is created with the
string encoded to utf-8 as body
a WSGI function the function is called as WSGI application
and buffered as response object
:class:`tuple` A tuple in the form ``(response, status,
headers)`` where `response` is any of the
types defined here, `status` is a string
or an integer and `headers` is a list of
a dictionary with header values.
======================= ===========================================
:param rv: the return value from the view function
.. versionchanged:: 0.9
Previously a tuple was interpreted as the arguments for the
response object.
"""
status = headers = None
if isinstance(rv, tuple):
rv, status, headers = rv + (None,) * (3 - len(rv))
if rv is None:
raise ValueError('View function did not return a response')
if not isinstance(rv, self.response_class):
# When we create a response object directly, we let the constructor
# set the headers and status. We do this because there can be
# some extra logic involved when creating these objects with
# specific values (like default content type selection).
if isinstance(rv, (text_type, bytes, bytearray)):
rv = self.response_class(rv, headers=headers, status=status)
headers = status = None
else:
rv = self.response_class.force_type(rv, request.environ)
if status is not None:
if isinstance(status, string_types):
rv.status = status
else:
rv.status_code = status
if headers:
rv.headers.extend(headers)
return rv
def create_url_adapter(self, request):
"""Creates a URL adapter for the given request. The URL adapter
is created at a point where the request context is not yet set up
so the request is passed explicitly.
.. versionadded:: 0.6
.. versionchanged:: 0.9
This can now also be called without a request object when the
URL adapter is created for the application context.
"""
if request is not None:
return self.url_map.bind_to_environ(request.environ,
server_name=self.config['SERVER_NAME'])
# We need at the very least the server name to be set for this
# to work.
if self.config['SERVER_NAME'] is not None:
return self.url_map.bind(
self.config['SERVER_NAME'],
script_name=self.config['APPLICATION_ROOT'] or '/',
url_scheme=self.config['PREFERRED_URL_SCHEME'])
def inject_url_defaults(self, endpoint, values):
"""Injects the URL defaults for the given endpoint directly into
the values dictionary passed. This is used internally and
automatically called on URL building.
.. versionadded:: 0.7
"""
funcs = self.url_default_functions.get(None, ())
if '.' in endpoint:
bp = endpoint.rsplit('.', 1)[0]
funcs = chain(funcs, self.url_default_functions.get(bp, ()))
for func in funcs:
func(endpoint, values)
def handle_url_build_error(self, error, endpoint, values):
"""Handle :class:`~werkzeug.routing.BuildError` on :meth:`url_for`.
"""
exc_type, exc_value, tb = sys.exc_info()
for handler in self.url_build_error_handlers:
try:
rv = handler(error, endpoint, values)
if rv is not None:
return rv
except BuildError as error:
pass
# At this point we want to reraise the exception. If the error is
# still the same one we can reraise it with the original traceback,
# otherwise we raise it from here.
if error is exc_value:
reraise(exc_type, exc_value, tb)
raise error
def preprocess_request(self):
"""Called before the actual request dispatching and will
call every as :meth:`before_request` decorated function.
If any of these function returns a value it's handled as
if it was the return value from the view and further
request handling is stopped.
This also triggers the :meth:`url_value_processor` functions before
the actual :meth:`before_request` functions are called.
"""
bp = _request_ctx_stack.top.request.blueprint
funcs = self.url_value_preprocessors.get(None, ())
if bp is not None and bp in self.url_value_preprocessors:
funcs = chain(funcs, self.url_value_preprocessors[bp])
for func in funcs:
func(request.endpoint, request.view_args)
funcs = self.before_request_funcs.get(None, ())
if bp is not None and bp in self.before_request_funcs:
funcs = chain(funcs, self.before_request_funcs[bp])
for func in funcs:
rv = func()
if rv is not None:
return rv
def process_response(self, response):
"""Can be overridden in order to modify the response object
before it's sent to the WSGI server. By default this will
call all the :meth:`after_request` decorated functions.
.. versionchanged:: 0.5
As of Flask 0.5 the functions registered for after request
execution are called in reverse order of registration.
:param response: a :attr:`response_class` object.
:return: a new response object or the same, has to be an
instance of :attr:`response_class`.
"""
ctx = _request_ctx_stack.top
bp = ctx.request.blueprint
funcs = ctx._after_request_functions
if bp is not None and bp in self.after_request_funcs:
funcs = chain(funcs, reversed(self.after_request_funcs[bp]))
if None in self.after_request_funcs:
funcs = chain(funcs, reversed(self.after_request_funcs[None]))
for handler in funcs:
response = handler(response)
if not self.session_interface.is_null_session(ctx.session):
self.save_session(ctx.session, response)
return response
def do_teardown_request(self, exc=None):
"""Called after the actual request dispatching and will
call every as :meth:`teardown_request` decorated function. This is
not actually called by the :class:`Flask` object itself but is always
triggered when the request context is popped. That way we have a
tighter control over certain resources under testing environments.
.. versionchanged:: 0.9
Added the `exc` argument. Previously this was always using the
current exception information.
"""
if exc is None:
exc = sys.exc_info()[1]
funcs = reversed(self.teardown_request_funcs.get(None, ()))
bp = _request_ctx_stack.top.request.blueprint
if bp is not None and bp in self.teardown_request_funcs:
funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))
for func in funcs:
rv = func(exc)
request_tearing_down.send(self, exc=exc)
def do_teardown_appcontext(self, exc=None):
"""Called when an application context is popped. This works pretty
much the same as :meth:`do_teardown_request` but for the application
context.
.. versionadded:: 0.9
"""
if exc is None:
exc = sys.exc_info()[1]
for func in reversed(self.teardown_appcontext_funcs):
func(exc)
appcontext_tearing_down.send(self, exc=exc)
def app_context(self):
"""Binds the application only. For as long as the application is bound
to the current context the :data:`flask.current_app` points to that
application. An application context is automatically created when a
request context is pushed if necessary.
Example usage::
with app.app_context():
...
.. versionadded:: 0.9
"""
return AppContext(self)
def request_context(self, environ):
"""Creates a :class:`~flask.ctx.RequestContext` from the given
environment and binds it to the current context. This must be used in
combination with the `with` statement because the request is only bound
to the current context for the duration of the `with` block.
Example usage::
with app.request_context(environ):
do_something_with(request)
The object returned can also be used without the `with` statement
which is useful for working in the shell. The example above is
doing exactly the same as this code::
ctx = app.request_context(environ)
ctx.push()
try:
do_something_with(request)
finally:
ctx.pop()
.. versionchanged:: 0.3
Added support for non-with statement usage and `with` statement
is now passed the ctx object.
:param environ: a WSGI environment
"""
return RequestContext(self, environ)
def test_request_context(self, *args, **kwargs):
"""Creates a WSGI environment from the given values (see
:func:`werkzeug.test.EnvironBuilder` for more information, this
function accepts the same arguments).
"""
from flask.testing import make_test_environ_builder
builder = make_test_environ_builder(self, *args, **kwargs)
try:
return self.request_context(builder.get_environ())
finally:
builder.close()
def wsgi_app(self, environ, start_response):
"""The actual WSGI application. This is not implemented in
`__call__` so that middlewares can be applied without losing a
reference to the class. So instead of doing this::
app = MyMiddleware(app)
It's a better idea to do this instead::
app.wsgi_app = MyMiddleware(app.wsgi_app)
Then you still have the original application object around and
can continue to call methods on it.
.. versionchanged:: 0.7
The behavior of the before and after request callbacks was changed
under error conditions and a new callback was added that will
always execute at the end of the request, independent on if an
error occurred or not. See :ref:`callbacks-and-errors`.
:param environ: a WSGI environment
:param start_response: a callable accepting a status code,
a list of headers and an optional
exception context to start the response
"""
ctx = self.request_context(environ)
ctx.push()
error = None
try:
try:
response = self.full_dispatch_request()
except Exception as e:
error = e
response = self.make_response(self.handle_exception(e))
return response(environ, start_response)
finally:
if self.should_ignore_error(error):
error = None
ctx.auto_pop(error)
@property
def modules(self):
from warnings import warn
warn(DeprecationWarning('Flask.modules is deprecated, use '
'Flask.blueprints instead'), stacklevel=2)
return self.blueprints
def __call__(self, environ, start_response):
"""Shortcut for :attr:`wsgi_app`."""
return self.wsgi_app(environ, start_response)
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.name,
)
|
SambitAcharya/coala | refs/heads/master | bears/coffee_script/__init__.py | 12133432 | |
burzillibus/RobHome | refs/heads/master | venv/lib/python2.7/site-packages/django/conf/locale/nb/__init__.py | 12133432 | |
kharkevich/molecule | refs/heads/master | test/functional/__init__.py | 12133432 | |
hashems/Mobile-Cloud-Development-Projects | refs/heads/master | appengine/flexible/static_files/main_test.py | 9 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import main
def test_index():
main.app.testing = True
client = main.app.test_client()
r = client.get('/')
assert r.status_code == 200
r = client.get('/static/main.css')
assert r.status_code == 200
|
remap/ndn-flow | refs/heads/master | framework/ndn_iot_js/.waf-1.7.9-ce9b2a6d663b32d9a89df143dad45f99/waflib/Tools/intltool.py | 330 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,re
from waflib import Configure,TaskGen,Task,Utils,Runner,Options,Build,Logs
import waflib.Tools.ccroot
from waflib.TaskGen import feature,before_method
from waflib.Logs import error
@before_method('process_source')
@feature('intltool_in')
def apply_intltool_in_f(self):
try:self.meths.remove('process_source')
except ValueError:pass
if not self.env.LOCALEDIR:
self.env.LOCALEDIR=self.env.PREFIX+'/share/locale'
for i in self.to_list(self.source):
node=self.path.find_resource(i)
podir=getattr(self,'podir','po')
podirnode=self.path.find_dir(podir)
if not podirnode:
error("could not find the podir %r"%podir)
continue
cache=getattr(self,'intlcache','.intlcache')
self.env['INTLCACHE']=os.path.join(self.path.bldpath(),podir,cache)
self.env['INTLPODIR']=podirnode.bldpath()
self.env['INTLFLAGS']=getattr(self,'flags',['-q','-u','-c'])
task=self.create_task('intltool',node,node.change_ext(''))
inst=getattr(self,'install_path','${LOCALEDIR}')
if inst:
self.bld.install_files(inst,task.outputs)
@feature('intltool_po')
def apply_intltool_po(self):
try:self.meths.remove('process_source')
except ValueError:pass
if not self.env.LOCALEDIR:
self.env.LOCALEDIR=self.env.PREFIX+'/share/locale'
appname=getattr(self,'appname','set_your_app_name')
podir=getattr(self,'podir','')
inst=getattr(self,'install_path','${LOCALEDIR}')
linguas=self.path.find_node(os.path.join(podir,'LINGUAS'))
if linguas:
file=open(linguas.abspath())
langs=[]
for line in file.readlines():
if not line.startswith('#'):
langs+=line.split()
file.close()
re_linguas=re.compile('[-a-zA-Z_@.]+')
for lang in langs:
if re_linguas.match(lang):
node=self.path.find_resource(os.path.join(podir,re_linguas.match(lang).group()+'.po'))
task=self.create_task('po',node,node.change_ext('.mo'))
if inst:
filename=task.outputs[0].name
(langname,ext)=os.path.splitext(filename)
inst_file=inst+os.sep+langname+os.sep+'LC_MESSAGES'+os.sep+appname+'.mo'
self.bld.install_as(inst_file,task.outputs[0],chmod=getattr(self,'chmod',Utils.O644),env=task.env)
else:
Logs.pprint('RED',"Error no LINGUAS file found in po directory")
class po(Task.Task):
run_str='${MSGFMT} -o ${TGT} ${SRC}'
color='BLUE'
class intltool(Task.Task):
run_str='${INTLTOOL} ${INTLFLAGS} ${INTLCACHE} ${INTLPODIR} ${SRC} ${TGT}'
color='BLUE'
def configure(conf):
conf.find_program('msgfmt',var='MSGFMT')
conf.find_perl_program('intltool-merge',var='INTLTOOL')
prefix=conf.env.PREFIX
datadir=conf.env.DATADIR
if not datadir:
datadir=os.path.join(prefix,'share')
conf.define('LOCALEDIR',os.path.join(datadir,'locale').replace('\\','\\\\'))
conf.define('DATADIR',datadir.replace('\\','\\\\'))
if conf.env.CC or conf.env.CXX:
conf.check(header_name='locale.h')
|
wfxiang08/django185 | refs/heads/master | django/core/cache/backends/locmem.py | 586 | "Thread-safe in-memory cache backend."
import time
from contextlib import contextmanager
from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache
from django.utils.synch import RWLock
try:
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
# Global in-memory store of cache data. Keyed by name, to provide
# multiple named local memory caches.
_caches = {}
_expire_info = {}
_locks = {}
@contextmanager
def dummy():
"""A context manager that does nothing special."""
yield
class LocMemCache(BaseCache):
def __init__(self, name, params):
BaseCache.__init__(self, params)
self._cache = _caches.setdefault(name, {})
self._expire_info = _expire_info.setdefault(name, {})
self._lock = _locks.setdefault(name, RWLock())
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
with self._lock.writer():
if self._has_expired(key):
self._set(key, pickled, timeout)
return True
return False
def get(self, key, default=None, version=None, acquire_lock=True):
key = self.make_key(key, version=version)
self.validate_key(key)
pickled = None
with (self._lock.reader() if acquire_lock else dummy()):
if not self._has_expired(key):
pickled = self._cache[key]
if pickled is not None:
try:
return pickle.loads(pickled)
except pickle.PickleError:
return default
with (self._lock.writer() if acquire_lock else dummy()):
try:
del self._cache[key]
del self._expire_info[key]
except KeyError:
pass
return default
def _set(self, key, value, timeout=DEFAULT_TIMEOUT):
if len(self._cache) >= self._max_entries:
self._cull()
self._cache[key] = value
self._expire_info[key] = self.get_backend_timeout(timeout)
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
with self._lock.writer():
self._set(key, pickled, timeout)
def incr(self, key, delta=1, version=None):
with self._lock.writer():
value = self.get(key, version=version, acquire_lock=False)
if value is None:
raise ValueError("Key '%s' not found" % key)
new_value = value + delta
key = self.make_key(key, version=version)
pickled = pickle.dumps(new_value, pickle.HIGHEST_PROTOCOL)
self._cache[key] = pickled
return new_value
def has_key(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
with self._lock.reader():
if not self._has_expired(key):
return True
with self._lock.writer():
try:
del self._cache[key]
del self._expire_info[key]
except KeyError:
pass
return False
def _has_expired(self, key):
exp = self._expire_info.get(key, -1)
if exp is None or exp > time.time():
return False
return True
def _cull(self):
if self._cull_frequency == 0:
self.clear()
else:
doomed = [k for (i, k) in enumerate(self._cache) if i % self._cull_frequency == 0]
for k in doomed:
self._delete(k)
def _delete(self, key):
try:
del self._cache[key]
except KeyError:
pass
try:
del self._expire_info[key]
except KeyError:
pass
def delete(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
with self._lock.writer():
self._delete(key)
def clear(self):
self._cache.clear()
self._expire_info.clear()
|
omprakasha/odoo | refs/heads/8.0 | addons/website_blog/controllers/__init__.py | 7372 | import main
|
mehdisadeghi/saga-python | refs/heads/master | tests/adaptors/irods/irods_test.py | 5 | #!/usr/bin/env python2.4
# -*- coding: utf-8 -*-
'''This example runs some iRODS commands
If something doesn't work as expected, try to set
SAGA_VERBOSE=3 in your environment before you run the
script in order to get some debug output.
If you think you have encountered a defect, please
report it at: https://github.com/saga-project/saga-python/issues
'''
__author__ = "Ashley Zebrowski"
__copyright__ = "Copyright 2012, Ashley Zebrowski"
__license__ = "MIT"
import sys, time
import saga
import os
FILE_SIZE = 1 # in megs, approx
NUM_REPLICAS = 5 # num replicas to create
TEMP_FILENAME = "test.txt" # filename to create and use for testing
TEMP_DIR = "/irods_test_dir/" #directory to create and use for testing
IRODS_DIRECTORY = "/osg/home/azebro1/" #directory to store our iRODS files in, don't forget trailing and leading /
IRODS_RESOURCE = "osgGridFtpGroup" #iRODS resource or resource group to upload files to
def main():
try:
#myfile = saga.logicalfile.LogicalFile('irods://'+IRODS_DIRECTORY+TEMP_FILENAME)
#myfile = saga.replica.LogicalFile('irods://'+IRODS_DIRECTORY+TEMP_FILENAME)
#myfile.add_location("irods:////data/cache/AGLT2_CE_2_FTPplaceholder/whatever?resource=AGLT2_CE_2_FTP")
#myfile.add_location("irods:///osg/home/azebro1/test_file?resource=AGLT2_CE_2_FTP")
# grab our home directory (tested on Linux)
home_dir = os.path.expanduser("~"+"/")
print "Creating temporary file of size %dM : %s" % \
(FILE_SIZE, home_dir+TEMP_FILENAME)
# create a file for us to use with iRODS
with open(home_dir+TEMP_FILENAME, "wb") as f:
f.write ("x" * (FILE_SIZE * pow(2,20)) )
print "Creating iRODS directory object"
mydir = saga.replica.LogicalDirectory("irods://localhost/" + IRODS_DIRECTORY)
import subprocess
subprocess.call(["irm", IRODS_DIRECTORY+TEMP_FILENAME])
print "Uploading file to iRODS"
myfile = saga.replica.LogicalFile('irods://'+IRODS_DIRECTORY+TEMP_FILENAME)
myfile.upload(home_dir + TEMP_FILENAME, \
"irods:///this/path/is/ignored/?resource="+IRODS_RESOURCE)
print "Deleting file locally : %s" % (home_dir + TEMP_FILENAME)
os.remove(home_dir + TEMP_FILENAME)
print "Printing iRODS directory listing for %s " % ("irods://" + IRODS_DIRECTORY)
for entry in mydir.list():
print entry
print "Creating iRODS file object"
myfile = saga.replica.LogicalFile('irods://' + IRODS_DIRECTORY+TEMP_FILENAME)
print "Size of test file %s on iRODS in bytes:" % (IRODS_DIRECTORY + TEMP_FILENAME)
print myfile.get_size()
print "Creating",NUM_REPLICAS,"replicas for",IRODS_DIRECTORY+TEMP_FILENAME
for i in range(NUM_REPLICAS):
myfile.replicate("irods:///this/path/is/ignored/?resource="+IRODS_RESOURCE)
print "Locations the file is stored at on iRODS:"
for entry in myfile.list_locations():
print entry
print "Downloading logical file %s to current/default directory" % \
(IRODS_DIRECTORY + TEMP_FILENAME)
myfile.download(TEMP_FILENAME)
import time
print "Downloading logical file %s to /tmp/" % \
(IRODS_DIRECTORY + TEMP_FILENAME)
myfile.download("/tmp/")
#exit(0)
print "Deleting downloaded file locally : %s" % (os.getcwd() + TEMP_FILENAME)
#os.remove(os.getcwd() +"/" + TEMP_FILENAME)
print "Deleting downloaded file locally : %s" % ("/tmp" + TEMP_FILENAME)
#os.remove("/tmp/" + TEMP_FILENAME)
print "Making test dir %s on iRODS" % (IRODS_DIRECTORY+TEMP_DIR)
mydir.make_dir("irods://"+IRODS_DIRECTORY+TEMP_DIR)
#commented because iRODS install on gw68 doesn't support move
#print "Moving file to %s test dir on iRODS" % (IRODS_DIRECTORY+TEMP_DIR)
#myfile.move("irods://"+IRODS_DIRECTORY+TEMP_DIR)
print "Deleting test dir %s from iRODS" % (IRODS_DIRECTORY+TEMP_DIR)
mydir.remove("irods://"+IRODS_DIRECTORY+TEMP_DIR)
print "Deleting file %s from iRODS" % (IRODS_DIRECTORY+TEMP_FILENAME)
myfile.remove()
print "iRODS test script finished execution"
except saga.SagaException, ex:
print "An error occured while executing the test script! %s" % (str(ex))
import traceback
print traceback.format_exc()
if __name__ == "__main__":
main()
|
JavML/django | refs/heads/master | tests/serializers/tests.py | 61 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import importlib
import json
import re
import unittest
from datetime import datetime
from xml.dom import minidom
from django.core import management, serializers
from django.core.serializers.base import ProgressBar
from django.db import connection, transaction
from django.test import (
SimpleTestCase, TestCase, TransactionTestCase, mock, override_settings,
skipUnlessDBFeature,
)
from django.test.utils import Approximate
from django.utils import six
from django.utils.six import StringIO
from .models import (
Actor, Article, Author, AuthorProfile, Category, Movie, Player, Score,
Team,
)
try:
import yaml
HAS_YAML = True
except ImportError:
HAS_YAML = False
@override_settings(
SERIALIZATION_MODULES={
"json2": "django.core.serializers.json",
}
)
class SerializerRegistrationTests(SimpleTestCase):
def setUp(self):
self.old_serializers = serializers._serializers
serializers._serializers = {}
def tearDown(self):
serializers._serializers = self.old_serializers
def test_register(self):
"Registering a new serializer populates the full registry. Refs #14823"
serializers.register_serializer('json3', 'django.core.serializers.json')
public_formats = serializers.get_public_serializer_formats()
self.assertIn('json3', public_formats)
self.assertIn('json2', public_formats)
self.assertIn('xml', public_formats)
def test_unregister(self):
"Unregistering a serializer doesn't cause the registry to be repopulated. Refs #14823"
serializers.unregister_serializer('xml')
serializers.register_serializer('json3', 'django.core.serializers.json')
public_formats = serializers.get_public_serializer_formats()
self.assertNotIn('xml', public_formats)
self.assertIn('json3', public_formats)
def test_builtin_serializers(self):
"Requesting a list of serializer formats popuates the registry"
all_formats = set(serializers.get_serializer_formats())
public_formats = set(serializers.get_public_serializer_formats())
self.assertIn('xml', all_formats),
self.assertIn('xml', public_formats)
self.assertIn('json2', all_formats)
self.assertIn('json2', public_formats)
self.assertIn('python', all_formats)
self.assertNotIn('python', public_formats)
class SerializersTestBase(object):
@staticmethod
def _comparison_value(value):
return value
def setUp(self):
sports = Category.objects.create(name="Sports")
music = Category.objects.create(name="Music")
op_ed = Category.objects.create(name="Op-Ed")
self.joe = Author.objects.create(name="Joe")
self.jane = Author.objects.create(name="Jane")
self.a1 = Article(
author=self.jane,
headline="Poker has no place on ESPN",
pub_date=datetime(2006, 6, 16, 11, 00)
)
self.a1.save()
self.a1.categories = [sports, op_ed]
self.a2 = Article(
author=self.joe,
headline="Time to reform copyright",
pub_date=datetime(2006, 6, 16, 13, 00, 11, 345)
)
self.a2.save()
self.a2.categories = [music, op_ed]
def test_serialize(self):
"""Tests that basic serialization works."""
serial_str = serializers.serialize(self.serializer_name,
Article.objects.all())
self.assertTrue(self._validate_output(serial_str))
def test_serializer_roundtrip(self):
"""Tests that serialized content can be deserialized."""
serial_str = serializers.serialize(self.serializer_name,
Article.objects.all())
models = list(serializers.deserialize(self.serializer_name, serial_str))
self.assertEqual(len(models), 2)
def test_altering_serialized_output(self):
"""
Tests the ability to create new objects by
modifying serialized content.
"""
old_headline = "Poker has no place on ESPN"
new_headline = "Poker has no place on television"
serial_str = serializers.serialize(self.serializer_name,
Article.objects.all())
serial_str = serial_str.replace(old_headline, new_headline)
models = list(serializers.deserialize(self.serializer_name, serial_str))
# Prior to saving, old headline is in place
self.assertTrue(Article.objects.filter(headline=old_headline))
self.assertFalse(Article.objects.filter(headline=new_headline))
for model in models:
model.save()
# After saving, new headline is in place
self.assertTrue(Article.objects.filter(headline=new_headline))
self.assertFalse(Article.objects.filter(headline=old_headline))
def test_one_to_one_as_pk(self):
"""
Tests that if you use your own primary key field
(such as a OneToOneField), it doesn't appear in the
serialized field list - it replaces the pk identifier.
"""
profile = AuthorProfile(author=self.joe,
date_of_birth=datetime(1970, 1, 1))
profile.save()
serial_str = serializers.serialize(self.serializer_name,
AuthorProfile.objects.all())
self.assertFalse(self._get_field_values(serial_str, 'author'))
for obj in serializers.deserialize(self.serializer_name, serial_str):
self.assertEqual(obj.object.pk, self._comparison_value(self.joe.pk))
def test_serialize_field_subset(self):
"""Tests that output can be restricted to a subset of fields"""
valid_fields = ('headline', 'pub_date')
invalid_fields = ("author", "categories")
serial_str = serializers.serialize(self.serializer_name,
Article.objects.all(),
fields=valid_fields)
for field_name in invalid_fields:
self.assertFalse(self._get_field_values(serial_str, field_name))
for field_name in valid_fields:
self.assertTrue(self._get_field_values(serial_str, field_name))
def test_serialize_unicode(self):
"""Tests that unicode makes the roundtrip intact"""
actor_name = "Za\u017c\u00f3\u0142\u0107"
movie_title = 'G\u0119\u015bl\u0105 ja\u017a\u0144'
ac = Actor(name=actor_name)
mv = Movie(title=movie_title, actor=ac)
ac.save()
mv.save()
serial_str = serializers.serialize(self.serializer_name, [mv])
self.assertEqual(self._get_field_values(serial_str, "title")[0], movie_title)
self.assertEqual(self._get_field_values(serial_str, "actor")[0], actor_name)
obj_list = list(serializers.deserialize(self.serializer_name, serial_str))
mv_obj = obj_list[0].object
self.assertEqual(mv_obj.title, movie_title)
def test_serialize_progressbar(self):
fake_stdout = StringIO()
serializers.serialize(
self.serializer_name, Article.objects.all(),
progress_output=fake_stdout, object_count=Article.objects.count()
)
self.assertTrue(
fake_stdout.getvalue().endswith('[' + '.' * ProgressBar.progress_width + ']\n')
)
def test_serialize_superfluous_queries(self):
"""Ensure no superfluous queries are made when serializing ForeignKeys
#17602
"""
ac = Actor(name='Actor name')
ac.save()
mv = Movie(title='Movie title', actor_id=ac.pk)
mv.save()
with self.assertNumQueries(0):
serializers.serialize(self.serializer_name, [mv])
def test_serialize_with_null_pk(self):
"""
Tests that serialized data with no primary key results
in a model instance with no id
"""
category = Category(name="Reference")
serial_str = serializers.serialize(self.serializer_name, [category])
pk_value = self._get_pk_values(serial_str)[0]
self.assertFalse(pk_value)
cat_obj = list(serializers.deserialize(self.serializer_name,
serial_str))[0].object
self.assertEqual(cat_obj.id, None)
def test_float_serialization(self):
"""Tests that float values serialize and deserialize intact"""
sc = Score(score=3.4)
sc.save()
serial_str = serializers.serialize(self.serializer_name, [sc])
deserial_objs = list(serializers.deserialize(self.serializer_name,
serial_str))
self.assertEqual(deserial_objs[0].object.score, Approximate(3.4, places=1))
def test_deferred_field_serialization(self):
author = Author.objects.create(name='Victor Hugo')
author = Author.objects.defer('name').get(pk=author.pk)
serial_str = serializers.serialize(self.serializer_name, [author])
deserial_objs = list(serializers.deserialize(self.serializer_name, serial_str))
# Check the class instead of using isinstance() because model instances
# with deferred fields (e.g. Author_Deferred_name) will pass isinstance.
self.assertEqual(deserial_objs[0].object.__class__, Author)
def test_custom_field_serialization(self):
"""Tests that custom fields serialize and deserialize intact"""
team_str = "Spartak Moskva"
player = Player()
player.name = "Soslan Djanaev"
player.rank = 1
player.team = Team(team_str)
player.save()
serial_str = serializers.serialize(self.serializer_name,
Player.objects.all())
team = self._get_field_values(serial_str, "team")
self.assertTrue(team)
self.assertEqual(team[0], team_str)
deserial_objs = list(serializers.deserialize(self.serializer_name, serial_str))
self.assertEqual(deserial_objs[0].object.team.to_string(),
player.team.to_string())
def test_pre_1000ad_date(self):
"""Tests that year values before 1000AD are properly formatted"""
# Regression for #12524 -- dates before 1000AD get prefixed
# 0's on the year
a = Article.objects.create(
author=self.jane,
headline="Nobody remembers the early years",
pub_date=datetime(1, 2, 3, 4, 5, 6))
serial_str = serializers.serialize(self.serializer_name, [a])
date_values = self._get_field_values(serial_str, "pub_date")
self.assertEqual(date_values[0].replace('T', ' '), "0001-02-03 04:05:06")
def test_pkless_serialized_strings(self):
"""
Tests that serialized strings without PKs
can be turned into models
"""
deserial_objs = list(serializers.deserialize(self.serializer_name,
self.pkless_str))
for obj in deserial_objs:
self.assertFalse(obj.object.id)
obj.save()
self.assertEqual(Category.objects.all().count(), 5)
def test_deterministic_mapping_ordering(self):
"""Mapping such as fields should be deterministically ordered. (#24558)"""
output = serializers.serialize(self.serializer_name, [self.a1], indent=2)
categories = self.a1.categories.values_list('pk', flat=True)
self.assertEqual(output, self.mapping_ordering_str % {
'article_pk': self.a1.pk,
'author_pk': self.a1.author_id,
'first_category_pk': categories[0],
'second_category_pk': categories[1],
})
def test_deserialize_force_insert(self):
"""Tests that deserialized content can be saved with force_insert as a parameter."""
serial_str = serializers.serialize(self.serializer_name, [self.a1])
deserial_obj = list(serializers.deserialize(self.serializer_name, serial_str))[0]
with mock.patch('django.db.models.Model') as mock_model:
deserial_obj.save(force_insert=False)
mock_model.save_base.assert_called_with(deserial_obj.object, raw=True, using=None, force_insert=False)
class SerializersTransactionTestBase(object):
available_apps = ['serializers']
@skipUnlessDBFeature('supports_forward_references')
def test_forward_refs(self):
"""
Tests that objects ids can be referenced before they are
defined in the serialization data.
"""
# The deserialization process needs to run in a transaction in order
# to test forward reference handling.
with transaction.atomic():
objs = serializers.deserialize(self.serializer_name, self.fwd_ref_str)
with connection.constraint_checks_disabled():
for obj in objs:
obj.save()
for model_cls in (Category, Author, Article):
self.assertEqual(model_cls.objects.all().count(), 1)
art_obj = Article.objects.all()[0]
self.assertEqual(art_obj.categories.all().count(), 1)
self.assertEqual(art_obj.author.name, "Agnes")
class XmlSerializerTestCase(SerializersTestBase, TestCase):
serializer_name = "xml"
pkless_str = """<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0">
<object model="serializers.category">
<field type="CharField" name="name">Reference</field>
</object>
<object model="serializers.category">
<field type="CharField" name="name">Non-fiction</field>
</object>
</django-objects>"""
mapping_ordering_str = """<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0">
<object model="serializers.article" pk="%(article_pk)s">
<field name="author" rel="ManyToOneRel" to="serializers.author">%(author_pk)s</field>
<field name="headline" type="CharField">Poker has no place on ESPN</field>
<field name="pub_date" type="DateTimeField">2006-06-16T11:00:00</field>
<field name="categories" rel="ManyToManyRel" to="serializers.category"><object pk="%(first_category_pk)s"></object><object pk="%(second_category_pk)s"></object></field>
<field name="meta_data" rel="ManyToManyRel" to="serializers.categorymetadata"></field>
</object>
</django-objects>""" # NOQA
@staticmethod
def _comparison_value(value):
# The XML serializer handles everything as strings, so comparisons
# need to be performed on the stringified value
return six.text_type(value)
@staticmethod
def _validate_output(serial_str):
try:
minidom.parseString(serial_str)
except Exception:
return False
else:
return True
@staticmethod
def _get_pk_values(serial_str):
ret_list = []
dom = minidom.parseString(serial_str)
fields = dom.getElementsByTagName("object")
for field in fields:
ret_list.append(field.getAttribute("pk"))
return ret_list
@staticmethod
def _get_field_values(serial_str, field_name):
ret_list = []
dom = minidom.parseString(serial_str)
fields = dom.getElementsByTagName("field")
for field in fields:
if field.getAttribute("name") == field_name:
temp = []
for child in field.childNodes:
temp.append(child.nodeValue)
ret_list.append("".join(temp))
return ret_list
def test_control_char_failure(self):
"""
Serializing control characters with XML should fail as those characters
are not supported in the XML 1.0 standard (except HT, LF, CR).
"""
self.a1.headline = "This contains \u0001 control \u0011 chars"
msg = "Article.headline (pk:%s) contains unserializable characters" % self.a1.pk
with self.assertRaisesMessage(ValueError, msg):
serializers.serialize(self.serializer_name, [self.a1])
self.a1.headline = "HT \u0009, LF \u000A, and CR \u000D are allowed"
self.assertIn(
"HT \t, LF \n, and CR \r are allowed",
serializers.serialize(self.serializer_name, [self.a1])
)
class XmlSerializerTransactionTestCase(SerializersTransactionTestBase, TransactionTestCase):
serializer_name = "xml"
fwd_ref_str = """<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0">
<object pk="1" model="serializers.article">
<field to="serializers.author" name="author" rel="ManyToOneRel">1</field>
<field type="CharField" name="headline">Forward references pose no problem</field>
<field type="DateTimeField" name="pub_date">2006-06-16T15:00:00</field>
<field to="serializers.category" name="categories" rel="ManyToManyRel">
<object pk="1"></object>
</field>
<field to="serializers.categorymetadata" name="meta_data" rel="ManyToManyRel"></field>
</object>
<object pk="1" model="serializers.author">
<field type="CharField" name="name">Agnes</field>
</object>
<object pk="1" model="serializers.category">
<field type="CharField" name="name">Reference</field></object>
</django-objects>"""
class JsonSerializerTestCase(SerializersTestBase, TestCase):
serializer_name = "json"
pkless_str = """[
{
"pk": null,
"model": "serializers.category",
"fields": {"name": "Reference"}
}, {
"model": "serializers.category",
"fields": {"name": "Non-fiction"}
}]"""
mapping_ordering_str = """[
{
"model": "serializers.article",
"pk": %(article_pk)s,
"fields": {
"author": %(author_pk)s,
"headline": "Poker has no place on ESPN",
"pub_date": "2006-06-16T11:00:00",
"categories": [
%(first_category_pk)s,
%(second_category_pk)s
],
"meta_data": []
}
}
]
"""
@staticmethod
def _validate_output(serial_str):
try:
json.loads(serial_str)
except Exception:
return False
else:
return True
@staticmethod
def _get_pk_values(serial_str):
ret_list = []
serial_list = json.loads(serial_str)
for obj_dict in serial_list:
ret_list.append(obj_dict["pk"])
return ret_list
@staticmethod
def _get_field_values(serial_str, field_name):
ret_list = []
serial_list = json.loads(serial_str)
for obj_dict in serial_list:
if field_name in obj_dict["fields"]:
ret_list.append(obj_dict["fields"][field_name])
return ret_list
def test_indentation_whitespace(self):
Score.objects.create(score=5.0)
Score.objects.create(score=6.0)
qset = Score.objects.all()
s = serializers.json.Serializer()
json_data = s.serialize(qset, indent=2)
for line in json_data.splitlines():
if re.search(r'.+,\s*$', line):
self.assertEqual(line, line.rstrip())
def test_helpful_error_message_invalid_pk(self):
"""
If there is an invalid primary key, the error message should contain
the model associated with it.
"""
test_string = """[{
"pk": "badpk",
"model": "serializers.player",
"fields": {
"name": "Bob",
"rank": 1,
"team": "Team"
}
}]"""
with self.assertRaisesMessage(serializers.base.DeserializationError, "(serializers.player:pk=badpk)"):
list(serializers.deserialize('json', test_string))
def test_helpful_error_message_invalid_field(self):
"""
If there is an invalid field value, the error message should contain
the model associated with it.
"""
test_string = """[{
"pk": "1",
"model": "serializers.player",
"fields": {
"name": "Bob",
"rank": "invalidint",
"team": "Team"
}
}]"""
expected = "(serializers.player:pk=1) field_value was 'invalidint'"
with self.assertRaisesMessage(serializers.base.DeserializationError, expected):
list(serializers.deserialize('json', test_string))
def test_helpful_error_message_for_foreign_keys(self):
"""
Invalid foreign keys with a natural key should throw a helpful error
message, such as what the failing key is.
"""
test_string = """[{
"pk": 1,
"model": "serializers.category",
"fields": {
"name": "Unknown foreign key",
"meta_data": [
"doesnotexist",
"metadata"
]
}
}]"""
key = ["doesnotexist", "metadata"]
expected = "(serializers.category:pk=1) field_value was '%r'" % key
with self.assertRaisesMessage(serializers.base.DeserializationError, expected):
list(serializers.deserialize('json', test_string))
def test_helpful_error_message_for_many2many_non_natural(self):
"""
Invalid many-to-many keys should throw a helpful error message.
"""
test_string = """[{
"pk": 1,
"model": "serializers.article",
"fields": {
"author": 1,
"headline": "Unknown many to many",
"pub_date": "2014-09-15T10:35:00",
"categories": [1, "doesnotexist"]
}
}, {
"pk": 1,
"model": "serializers.author",
"fields": {
"name": "Agnes"
}
}, {
"pk": 1,
"model": "serializers.category",
"fields": {
"name": "Reference"
}
}]"""
expected = "(serializers.article:pk=1) field_value was 'doesnotexist'"
with self.assertRaisesMessage(serializers.base.DeserializationError, expected):
list(serializers.deserialize('json', test_string))
def test_helpful_error_message_for_many2many_natural1(self):
"""
Invalid many-to-many keys should throw a helpful error message.
This tests the code path where one of a list of natural keys is invalid.
"""
test_string = """[{
"pk": 1,
"model": "serializers.categorymetadata",
"fields": {
"kind": "author",
"name": "meta1",
"value": "Agnes"
}
}, {
"pk": 1,
"model": "serializers.article",
"fields": {
"author": 1,
"headline": "Unknown many to many",
"pub_date": "2014-09-15T10:35:00",
"meta_data": [
["author", "meta1"],
["doesnotexist", "meta1"],
["author", "meta1"]
]
}
}, {
"pk": 1,
"model": "serializers.author",
"fields": {
"name": "Agnes"
}
}]"""
key = ["doesnotexist", "meta1"]
expected = "(serializers.article:pk=1) field_value was '%r'" % key
with self.assertRaisesMessage(serializers.base.DeserializationError, expected):
for obj in serializers.deserialize('json', test_string):
obj.save()
def test_helpful_error_message_for_many2many_natural2(self):
"""
Invalid many-to-many keys should throw a helpful error message. This
tests the code path where a natural many-to-many key has only a single
value.
"""
test_string = """[{
"pk": 1,
"model": "serializers.article",
"fields": {
"author": 1,
"headline": "Unknown many to many",
"pub_date": "2014-09-15T10:35:00",
"meta_data": [1, "doesnotexist"]
}
}, {
"pk": 1,
"model": "serializers.categorymetadata",
"fields": {
"kind": "author",
"name": "meta1",
"value": "Agnes"
}
}, {
"pk": 1,
"model": "serializers.author",
"fields": {
"name": "Agnes"
}
}]"""
expected = "(serializers.article:pk=1) field_value was 'doesnotexist'"
with self.assertRaisesMessage(serializers.base.DeserializationError, expected):
for obj in serializers.deserialize('json', test_string, ignore=False):
obj.save()
class JsonSerializerTransactionTestCase(SerializersTransactionTestBase, TransactionTestCase):
serializer_name = "json"
fwd_ref_str = """[
{
"pk": 1,
"model": "serializers.article",
"fields": {
"headline": "Forward references pose no problem",
"pub_date": "2006-06-16T15:00:00",
"categories": [1],
"author": 1
}
},
{
"pk": 1,
"model": "serializers.category",
"fields": {
"name": "Reference"
}
},
{
"pk": 1,
"model": "serializers.author",
"fields": {
"name": "Agnes"
}
}]"""
YAML_IMPORT_ERROR_MESSAGE = r'No module named yaml'
class YamlImportModuleMock(object):
"""Provides a wrapped import_module function to simulate yaml ImportError
In order to run tests that verify the behavior of the YAML serializer
when run on a system that has yaml installed (like the django CI server),
mock import_module, so that it raises an ImportError when the yaml
serializer is being imported. The importlib.import_module() call is
being made in the serializers.register_serializer().
Refs: #12756
"""
def __init__(self):
self._import_module = importlib.import_module
def import_module(self, module_path):
if module_path == serializers.BUILTIN_SERIALIZERS['yaml']:
raise ImportError(YAML_IMPORT_ERROR_MESSAGE)
return self._import_module(module_path)
class NoYamlSerializerTestCase(SimpleTestCase):
"""Not having pyyaml installed provides a misleading error
Refs: #12756
"""
@classmethod
def setUpClass(cls):
"""Removes imported yaml and stubs importlib.import_module"""
super(NoYamlSerializerTestCase, cls).setUpClass()
cls._import_module_mock = YamlImportModuleMock()
importlib.import_module = cls._import_module_mock.import_module
# clear out cached serializers to emulate yaml missing
serializers._serializers = {}
@classmethod
def tearDownClass(cls):
"""Puts yaml back if necessary"""
super(NoYamlSerializerTestCase, cls).tearDownClass()
importlib.import_module = cls._import_module_mock._import_module
# clear out cached serializers to clean out BadSerializer instances
serializers._serializers = {}
def test_serializer_pyyaml_error_message(self):
"""Using yaml serializer without pyyaml raises ImportError"""
jane = Author(name="Jane")
self.assertRaises(ImportError, serializers.serialize, "yaml", [jane])
def test_deserializer_pyyaml_error_message(self):
"""Using yaml deserializer without pyyaml raises ImportError"""
self.assertRaises(ImportError, serializers.deserialize, "yaml", "")
def test_dumpdata_pyyaml_error_message(self):
"""Calling dumpdata produces an error when yaml package missing"""
with six.assertRaisesRegex(self, management.CommandError, YAML_IMPORT_ERROR_MESSAGE):
management.call_command('dumpdata', format='yaml')
@unittest.skipUnless(HAS_YAML, "No yaml library detected")
class YamlSerializerTestCase(SerializersTestBase, TestCase):
serializer_name = "yaml"
fwd_ref_str = """- fields:
headline: Forward references pose no problem
pub_date: 2006-06-16 15:00:00
categories: [1]
author: 1
pk: 1
model: serializers.article
- fields:
name: Reference
pk: 1
model: serializers.category
- fields:
name: Agnes
pk: 1
model: serializers.author"""
pkless_str = """- fields:
name: Reference
pk: null
model: serializers.category
- fields:
name: Non-fiction
model: serializers.category"""
mapping_ordering_str = """- model: serializers.article
pk: %(article_pk)s
fields:
author: %(author_pk)s
headline: Poker has no place on ESPN
pub_date: 2006-06-16 11:00:00
categories: [%(first_category_pk)s, %(second_category_pk)s]
meta_data: []
"""
@staticmethod
def _validate_output(serial_str):
try:
yaml.safe_load(StringIO(serial_str))
except Exception:
return False
else:
return True
@staticmethod
def _get_pk_values(serial_str):
ret_list = []
stream = StringIO(serial_str)
for obj_dict in yaml.safe_load(stream):
ret_list.append(obj_dict["pk"])
return ret_list
@staticmethod
def _get_field_values(serial_str, field_name):
ret_list = []
stream = StringIO(serial_str)
for obj_dict in yaml.safe_load(stream):
if "fields" in obj_dict and field_name in obj_dict["fields"]:
field_value = obj_dict["fields"][field_name]
# yaml.safe_load will return non-string objects for some
# of the fields we are interested in, this ensures that
# everything comes back as a string
if isinstance(field_value, six.string_types):
ret_list.append(field_value)
else:
ret_list.append(str(field_value))
return ret_list
@unittest.skipUnless(HAS_YAML, "No yaml library detected")
class YamlSerializerTransactionTestCase(SerializersTransactionTestBase, TransactionTestCase):
serializer_name = "yaml"
fwd_ref_str = """- fields:
headline: Forward references pose no problem
pub_date: 2006-06-16 15:00:00
categories: [1]
author: 1
pk: 1
model: serializers.article
- fields:
name: Reference
pk: 1
model: serializers.category
- fields:
name: Agnes
pk: 1
model: serializers.author"""
|
aduric/crossfit | refs/heads/master | nonrel/django/utils/formats.py | 159 | import decimal
import datetime
from django.conf import settings
from django.utils.translation import get_language, to_locale, check_for_language
from django.utils.importlib import import_module
from django.utils.encoding import smart_str
from django.utils import dateformat, numberformat, datetime_safe
from django.utils.safestring import mark_safe
# format_cache is a mapping from (format_type, lang) to the format string.
# By using the cache, it is possible to avoid running get_format_modules
# repeatedly.
_format_cache = {}
_format_modules_cache = {}
def reset_format_cache():
"""Clear any cached formats.
This method is provided primarily for testing purposes,
so that the effects of cached formats can be removed.
"""
global _format_cache, _format_modules_cache
_format_cache = {}
_format_modules_cache = {}
def iter_format_modules(lang):
"""
Does the heavy lifting of finding format modules.
"""
if check_for_language(lang):
format_locations = ['django.conf.locale.%s']
if settings.FORMAT_MODULE_PATH:
format_locations.append(settings.FORMAT_MODULE_PATH + '.%s')
format_locations.reverse()
locale = to_locale(lang)
locales = [locale]
if '_' in locale:
locales.append(locale.split('_')[0])
for location in format_locations:
for loc in locales:
try:
yield import_module('.formats', location % loc)
except ImportError:
pass
def get_format_modules(reverse=False):
"""
Returns a list of the format modules found
"""
lang = get_language()
modules = _format_modules_cache.setdefault(lang, list(iter_format_modules(lang)))
if reverse:
return list(reversed(modules))
return modules
def get_format(format_type, lang=None, use_l10n=None):
"""
For a specific format type, returns the format for the current
language (locale), defaults to the format in the settings.
format_type is the name of the format, e.g. 'DATE_FORMAT'
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
format_type = smart_str(format_type)
if use_l10n or (use_l10n is None and settings.USE_L10N):
if lang is None:
lang = get_language()
cache_key = (format_type, lang)
try:
return _format_cache[cache_key] or getattr(settings, format_type)
except KeyError:
for module in get_format_modules():
try:
val = getattr(module, format_type)
_format_cache[cache_key] = val
return val
except AttributeError:
pass
_format_cache[cache_key] = None
return getattr(settings, format_type)
def date_format(value, format=None, use_l10n=None):
"""
Formats a datetime.date or datetime.datetime object using a
localizable format
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
return dateformat.format(value, get_format(format or 'DATE_FORMAT', use_l10n=use_l10n))
def time_format(value, format=None, use_l10n=None):
"""
Formats a datetime.time object using a localizable format
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
return dateformat.time_format(value, get_format(format or 'TIME_FORMAT', use_l10n=use_l10n))
def number_format(value, decimal_pos=None, use_l10n=None):
"""
Formats a numeric value using localization settings
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
if use_l10n or (use_l10n is None and settings.USE_L10N):
lang = get_language()
else:
lang = None
return numberformat.format(
value,
get_format('DECIMAL_SEPARATOR', lang, use_l10n=use_l10n),
decimal_pos,
get_format('NUMBER_GROUPING', lang, use_l10n=use_l10n),
get_format('THOUSAND_SEPARATOR', lang, use_l10n=use_l10n),
)
def localize(value, use_l10n=None):
"""
Checks if value is a localizable type (date, number...) and returns it
formatted as a string using current locale format.
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
if isinstance(value, bool):
return mark_safe(unicode(value))
elif isinstance(value, (decimal.Decimal, float, int, long)):
return number_format(value, use_l10n=use_l10n)
elif isinstance(value, datetime.datetime):
return date_format(value, 'DATETIME_FORMAT', use_l10n=use_l10n)
elif isinstance(value, datetime.date):
return date_format(value, use_l10n=use_l10n)
elif isinstance(value, datetime.time):
return time_format(value, 'TIME_FORMAT', use_l10n=use_l10n)
else:
return value
def localize_input(value, default=None):
"""
Checks if an input value is a localizable type and returns it
formatted with the appropriate formatting string of the current locale.
"""
if isinstance(value, (decimal.Decimal, float, int, long)):
return number_format(value)
elif isinstance(value, datetime.datetime):
value = datetime_safe.new_datetime(value)
format = smart_str(default or get_format('DATETIME_INPUT_FORMATS')[0])
return value.strftime(format)
elif isinstance(value, datetime.date):
value = datetime_safe.new_date(value)
format = smart_str(default or get_format('DATE_INPUT_FORMATS')[0])
return value.strftime(format)
elif isinstance(value, datetime.time):
format = smart_str(default or get_format('TIME_INPUT_FORMATS')[0])
return value.strftime(format)
return value
def sanitize_separators(value):
"""
Sanitizes a value according to the current decimal and
thousand separator setting. Used with form field input.
"""
if settings.USE_L10N:
decimal_separator = get_format('DECIMAL_SEPARATOR')
if isinstance(value, basestring):
parts = []
if decimal_separator in value:
value, decimals = value.split(decimal_separator, 1)
parts.append(decimals)
if settings.USE_THOUSAND_SEPARATOR:
parts.append(value.replace(get_format('THOUSAND_SEPARATOR'), ''))
else:
parts.append(value)
value = '.'.join(reversed(parts))
return value
|
sm0svx/asciidoc | refs/heads/master | filters/graphviz/graphviz2png.py | 29 | #!/usr/bin/env python
import os, sys, subprocess
from optparse import *
__AUTHOR__ = "Gouichi Iisaka <iisaka51@gmail.com>"
__VERSION__ = '1.1.4'
class EApp(Exception):
'''Application specific exception.'''
pass
class Application():
'''
NAME
graphviz2png - Converts textual graphviz notation to PNG file
SYNOPSIS
graphviz2png [options] INFILE
DESCRIPTION
This filter reads Graphviz notation text from the input file
INFILE (or stdin if INFILE is -), converts it to a PNG image file.
OPTIONS
-o OUTFILE, --outfile=OUTFILE
The file name of the output file. If not specified the output file is
named like INFILE but with a .png file name extension.
-L LAYOUT, --layout=LAYOUT
Graphviz layout: dot, neato, twopi, circo, fdp
Default is 'dot'.
-F FORMAT, --format=FORMAT
Graphviz output format: png, svg, or any other format Graphviz
supports. Run dot -T? to get the full list.
Default is 'png'.
-v, --verbose
Verbosely print processing information to stderr.
-h, --help
Print this documentation.
-V, --version
Print program version number.
SEE ALSO
graphviz(1)
AUTHOR
Written by Gouichi Iisaka, <iisaka51@gmail.com>
Format support added by Elmo Todurov, <todurov@gmail.com>
THANKS
Stuart Rackham, <srackham@gmail.com>
This script was inspired by his music2png.py and AsciiDoc
LICENSE
Copyright (C) 2008-2009 Gouichi Iisaka.
Free use of this software is granted under the terms of
the GNU General Public License (GPL).
'''
def __init__(self, argv=None):
# Run dot, get the list of supported formats. It's prefixed by some junk.
format_output = subprocess.Popen(["dot", "-T?"], stderr=subprocess.PIPE, stdout=subprocess.PIPE).communicate()[1]
# The junk contains : and ends with :. So we split it, then strip the final endline, then split the list for future usage.
supported_formats = format_output.split(": ")[2][:-1].split(" ")
if not argv:
argv = sys.argv
self.usage = '%prog [options] inputfile'
self.version = 'Version: %s\n' % __VERSION__
self.version += 'Copyright(c) 2008-2009: %s\n' % __AUTHOR__
self.option_list = [
Option("-o", "--outfile", action="store",
dest="outfile",
help="Output file"),
Option("-L", "--layout", action="store",
dest="layout", default="dot", type="choice",
choices=['dot','neato','twopi','circo','fdp'],
help="Layout type. LAYOUT=<dot|neato|twopi|circo|fdp>"),
Option("-F", "--format", action="store",
dest="format", default="png", type="choice",
choices=supported_formats,
help="Format type. FORMAT=<" + "|".join(supported_formats) + ">"),
Option("--debug", action="store_true",
dest="do_debug",
help=SUPPRESS_HELP),
Option("-v", "--verbose", action="store_true",
dest="do_verbose", default=False,
help="verbose output"),
]
self.parser = OptionParser( usage=self.usage, version=self.version,
option_list=self.option_list)
(self.options, self.args) = self.parser.parse_args()
if len(self.args) != 1:
self.parser.print_help()
sys.exit(1)
self.options.infile = self.args[0]
def systemcmd(self, cmd):
if self.options.do_verbose:
msg = 'Execute: %s' % cmd
sys.stderr.write(msg + os.linesep)
else:
cmd += ' 2>%s' % os.devnull
if os.system(cmd):
raise EApp, 'failed command: %s' % cmd
def graphviz2png(self, infile, outfile):
'''Convert Graphviz notation in file infile to
PNG file named outfile.'''
outfile = os.path.abspath(outfile)
outdir = os.path.dirname(outfile)
if not os.path.isdir(outdir):
raise EApp, 'directory does not exist: %s' % outdir
basefile = os.path.splitext(outfile)[0]
saved_cwd = os.getcwd()
os.chdir(outdir)
try:
cmd = '%s -T%s "%s" > "%s"' % (
self.options.layout, self.options.format, infile, outfile)
self.systemcmd(cmd)
finally:
os.chdir(saved_cwd)
if not self.options.do_debug:
os.unlink(infile)
def run(self):
if self.options.format == '':
self.options.format = 'png'
if self.options.infile == '-':
if self.options.outfile is None:
sys.stderr.write('OUTFILE must be specified')
sys.exit(1)
infile = os.path.splitext(self.options.outfile)[0] + '.txt'
lines = sys.stdin.readlines()
open(infile, 'w').writelines(lines)
if not os.path.isfile(infile):
raise EApp, 'input file does not exist: %s' % infile
if self.options.outfile is None:
outfile = os.path.splitext(infile)[0] + '.png'
else:
outfile = self.options.outfile
self.graphviz2png(infile, outfile)
# To suppress asciidoc 'no output from filter' warnings.
if self.options.infile == '-':
sys.stdout.write(' ')
if __name__ == "__main__":
app = Application()
app.run()
|
vicky2135/lucious | refs/heads/master | oscar/lib/python2.7/site-packages/IPython/utils/tz.py | 42 | # encoding: utf-8
"""
Timezone utilities
Just UTC-awareness right now
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from datetime import tzinfo, timedelta, datetime
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
# constant for zero offset
ZERO = timedelta(0)
class tzUTC(tzinfo):
"""tzinfo object for UTC (zero offset)"""
def utcoffset(self, d):
return ZERO
def dst(self, d):
return ZERO
UTC = tzUTC()
def utc_aware(unaware):
"""decorator for adding UTC tzinfo to datetime's utcfoo methods"""
def utc_method(*args, **kwargs):
dt = unaware(*args, **kwargs)
return dt.replace(tzinfo=UTC)
return utc_method
utcfromtimestamp = utc_aware(datetime.utcfromtimestamp)
utcnow = utc_aware(datetime.utcnow)
|
esmoyon1/GeonodeV1 | refs/heads/Branch1 | geonode/base/migrations/26_to_27.py | 3 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '24_to_26'),
]
operations = [
migrations.AlterField(
model_name='resourcebase',
name='tkeywords',
field=models.ManyToManyField(help_text='formalised word(s) or phrase(s) from a fixed thesaurus used to describe the subject (space or comma-separated', to='base.ThesaurusKeyword', blank=True),
),
migrations.AddField(
model_name='resourcebase',
name='group',
field=models.ForeignKey(blank=True, to='auth.Group', null=True),
),
migrations.AddField(
model_name='resourcebase',
name='alternate',
field=models.CharField(max_length=128, null=True, blank=True),
),
]
|
jjlee3/openthread | refs/heads/master | tools/harness-automation/cases/leader_5_6_5.py | 16 | #!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from autothreadharness.harness_case import HarnessCase
class Leader_5_6_5(HarnessCase):
role = HarnessCase.ROLE_LEADER
case = '5 6 5'
golden_devices_required = 3
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
|
damirda/ansible-modules-core | refs/heads/devel | cloud/azure/azure_rm_securitygroup.py | 46 | #!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: azure_rm_securitygroup
version_added: "2.1"
short_description: Manage Azure network security groups.
description:
- Create, update or delete a network security group. A security group contains Access Control List (ACL) rules
that allow or deny network traffic to subnets or individual network interfaces. A security group is created
with a set of default security rules and an empty set of security rules. Shape traffic flow by adding
rules to the empty set of security rules.
options:
default_rules:
description:
- The set of default rules automatically added to a security group at creation. In general default
rules will not be modified. Modify rules to shape the flow of traffic to or from a subnet or NIC. See
rules below for the makeup of a rule dict.
required: false
default: null
location:
description:
- Valid azure location. Defaults to location of the resource group.
default: resource_group location
required: false
name:
description:
- Name of the security group to operate on.
required: false
default: null
purge_default_rules:
description:
- Remove any existing rules not matching those defined in the default_rules parameter.
default: false
required: false
purge_rules:
description:
- Remove any existing rules not matching those defined in the rules parameters.
default: false
required: false
resource_group:
description:
- Name of the resource group the security group belongs to.
required: true
rules:
description:
- Set of rules shaping traffic flow to or from a subnet or NIC. Each rule is a dictionary.
type: complex
required: false
default: null
contains:
name:
description: Unique name for the rule.
required: true
description:
description: Short description of the rule's purpose.
protocol:
description: Accepted traffic protocol.
choices:
- Udp
- Tcp
- "*"
default: "*"
source_port_range:
description: Port or range of ports from which traffic originates.
default: "*"
destination_port_range:
description: Port or range of ports to which traffic is headed.
default: "*"
source_address_prefix:
description: IP address or CIDR from which traffic originates.
default: "*"
destination_address_prefix:
description: IP address or CIDR to which traffic is headed.
default: "*"
access:
description: Whether or not to allow the traffic flow.
choices:
- Allow
- Deny
default: Allow
priority:
description: Order in which to apply the rule. Must a unique integer between 100 and 4096 inclusive.
type: int
required: true
direction:
description: Indicates the direction of the traffic flow.
choices:
- Inbound
- Outbound
default: Inbound
state:
description:
- Assert the state of the security group. Set to 'present' to create or update a security group. Set to
'absent' to remove a security group.
default: present
required: false
choices:
- absent
- present
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Chris Houseknecht (@chouseknecht)"
- "Matt Davis (@nitzmahone)"
'''
EXAMPLES = '''
# Create a security group
- azure_rm_securitygroup:
resource_group: mygroup
name: mysecgroup
purge_rules: yes
rules:
- name: DenySSH
protocol: TCP
destination_port_range: 22
access: Deny
priority: 100
direction: Inbound
- name: 'AllowSSH'
protocol: TCP
source_address_prefix: '174.109.158.0/24'
destination_port_range: 22
access: Allow
priority: 101
direction: Inbound
# Update rules on existing security group
- azure_rm_securitygroup:
resource_group: mygroup
name: mysecgroup
rules:
- name: DenySSH
protocol: TCP
destination_port_range: 22-23
access: Deny
priority: 100
direction: Inbound
- name: AllowSSHFromHome
protocol: TCP
source_address_prefix: '174.109.158.0/24'
destination_port_range: 22-23
access: Allow
priority: 102
direction: Inbound
tags:
testing: testing
delete: on-exit
# Delete security group
- azure_rm_securitygroup:
resource_group: mygroup
name: mysecgroup
state: absent
'''
RETURN = '''
state:
description: Current state of the security group.
returned: always
type: dict
sample: {
"default_rules": [
{
"access": "Allow",
"description": "Allow inbound traffic from all VMs in VNET",
"destination_address_prefix": "VirtualNetwork",
"destination_port_range": "*",
"direction": "Inbound",
"etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/AllowVnetInBound",
"name": "AllowVnetInBound",
"priority": 65000,
"protocol": "*",
"provisioning_state": "Succeeded",
"source_address_prefix": "VirtualNetwork",
"source_port_range": "*"
},
{
"access": "Allow",
"description": "Allow inbound traffic from azure load balancer",
"destination_address_prefix": "*",
"destination_port_range": "*",
"direction": "Inbound",
"etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/AllowAzureLoadBalancerInBound",
"name": "AllowAzureLoadBalancerInBound",
"priority": 65001,
"protocol": "*",
"provisioning_state": "Succeeded",
"source_address_prefix": "AzureLoadBalancer",
"source_port_range": "*"
},
{
"access": "Deny",
"description": "Deny all inbound traffic",
"destination_address_prefix": "*",
"destination_port_range": "*",
"direction": "Inbound",
"etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/DenyAllInBound",
"name": "DenyAllInBound",
"priority": 65500,
"protocol": "*",
"provisioning_state": "Succeeded",
"source_address_prefix": "*",
"source_port_range": "*"
},
{
"access": "Allow",
"description": "Allow outbound traffic from all VMs to all VMs in VNET",
"destination_address_prefix": "VirtualNetwork",
"destination_port_range": "*",
"direction": "Outbound",
"etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/AllowVnetOutBound",
"name": "AllowVnetOutBound",
"priority": 65000,
"protocol": "*",
"provisioning_state": "Succeeded",
"source_address_prefix": "VirtualNetwork",
"source_port_range": "*"
},
{
"access": "Allow",
"description": "Allow outbound traffic from all VMs to Internet",
"destination_address_prefix": "Internet",
"destination_port_range": "*",
"direction": "Outbound",
"etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/AllowInternetOutBound",
"name": "AllowInternetOutBound",
"priority": 65001,
"protocol": "*",
"provisioning_state": "Succeeded",
"source_address_prefix": "*",
"source_port_range": "*"
},
{
"access": "Deny",
"description": "Deny all outbound traffic",
"destination_address_prefix": "*",
"destination_port_range": "*",
"direction": "Outbound",
"etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/DenyAllOutBound",
"name": "DenyAllOutBound",
"priority": 65500,
"protocol": "*",
"provisioning_state": "Succeeded",
"source_address_prefix": "*",
"source_port_range": "*"
}
],
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup",
"location": "westus",
"name": "mysecgroup",
"network_interfaces": [],
"rules": [
{
"access": "Deny",
"description": null,
"destination_address_prefix": "*",
"destination_port_range": "22",
"direction": "Inbound",
"etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/securityRules/DenySSH",
"name": "DenySSH",
"priority": 100,
"protocol": "Tcp",
"provisioning_state": "Succeeded",
"source_address_prefix": "*",
"source_port_range": "*"
},
{
"access": "Allow",
"description": null,
"destination_address_prefix": "*",
"destination_port_range": "22",
"direction": "Inbound",
"etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/securityRules/AllowSSH",
"name": "AllowSSH",
"priority": 101,
"protocol": "Tcp",
"provisioning_state": "Succeeded",
"source_address_prefix": "174.109.158.0/24",
"source_port_range": "*"
}
],
"subnets": [],
"tags": {
"delete": "on-exit",
"foo": "bar",
"testing": "testing"
},
"type": "Microsoft.Network/networkSecurityGroups"
}
'''
from ansible.module_utils.basic import *
from ansible.module_utils.azure_rm_common import *
try:
from msrestazure.azure_exceptions import CloudError
from azure.common import AzureHttpError
from azure.mgmt.network.models import NetworkSecurityGroup, SecurityRule
from azure.mgmt.network.models.network_management_client_enums import (SecurityRuleAccess,
SecurityRuleDirection,
SecurityRuleProtocol)
except ImportError:
# This is handled in azure_rm_common
pass
def validate_rule(rule, rule_type=None):
'''
Apply defaults to a rule dictionary and check that all values are valid.
:param rule: rule dict
:param rule_type: Set to 'default' if the rule is part of the default set of rules.
:return: None
'''
if not rule.get('name'):
raise Exception("Rule name value is required.")
priority = rule.get('priority', None)
if not priority:
raise Exception("Rule priority is required.")
if not isinstance(priority, (int, long)):
raise Exception("Rule priority attribute must be an integer.")
if rule_type != 'default' and (priority < 100 or priority > 4096):
raise Exception("Rule priority must be between 100 and 4096")
if not rule.get('access'):
rule['access'] = 'Allow'
access_names = [member.value for member in SecurityRuleAccess]
if rule['access'] not in access_names:
raise Exception("Rule access must be one of [{0}]".format(', '.join(access_names)))
if not rule.get('destination_address_prefix'):
rule['destination_address_prefix'] = '*'
if not rule.get('source_address_prefix'):
rule['source_address_prefix'] = '*'
if not rule.get('protocol'):
rule['protocol'] = '*'
protocol_names = [member.value for member in SecurityRuleProtocol]
if rule['protocol'] not in protocol_names:
raise Exception("Rule protocol must be one of [{0}]".format(', '.join(protocol_names)))
if not rule.get('direction'):
rule['direction'] = 'Inbound'
direction_names = [member.value for member in SecurityRuleDirection]
if rule['direction'] not in direction_names:
raise Exception("Rule direction must be one of [{0}]".format(', '.join(direction_names)))
if not rule.get('source_port_range'):
rule['source_port_range'] = '*'
if not rule.get('destination_port_range'):
rule['destination_port_range'] = '*'
def compare_rules(r, rule):
matched = False
changed = False
if r['name'] == rule['name']:
matched = True
if rule.get('description', None) != r['description']:
changed = True
r['description'] = rule['description']
if rule['protocol'] != r['protocol']:
changed = True
r['protocol'] = rule['protocol']
if rule['source_port_range'] != r['source_port_range']:
changed = True
r['source_port_range'] = rule['source_port_range']
if rule['destination_port_range'] != r['destination_port_range']:
changed = True
r['destination_port_range'] = rule['destination_port_range']
if rule['access'] != r['access']:
changed = True
r['access'] = rule['access']
if rule['priority'] != r['priority']:
changed = True
r['priority'] = rule['priority']
if rule['direction'] != r['direction']:
changed = True
r['direction'] = rule['direction']
return matched, changed
def create_rule_instance(rule):
'''
Create an instance of SecurityRule from a dict.
:param rule: dict
:return: SecurityRule
'''
return SecurityRule(
rule['protocol'],
rule['source_address_prefix'],
rule['destination_address_prefix'],
rule['access'],
rule['direction'],
id=rule.get('id', None),
description=rule.get('description', None),
source_port_range=rule.get('source_port_range', None),
destination_port_range=rule.get('destination_port_range', None),
priority=rule.get('priority', None),
provisioning_state=rule.get('provisioning_state', None),
name=rule.get('name', None),
etag=rule.get('etag', None)
)
def create_rule_dict_from_obj(rule):
'''
Create a dict from an instance of a SecurityRule.
:param rule: SecurityRule
:return: dict
'''
return dict(
id=rule.id,
name=rule.name,
description=rule.description,
protocol=rule.protocol,
source_port_range=rule.source_port_range,
destination_port_range=rule.destination_port_range,
source_address_prefix=rule.source_address_prefix,
destination_address_prefix=rule.destination_address_prefix,
access=rule.access,
priority=rule.priority,
direction=rule.direction,
provisioning_state=rule.provisioning_state,
etag=rule.etag
)
def create_network_security_group_dict(nsg):
results = dict(
id=nsg.id,
name=nsg.name,
type=nsg.type,
location=nsg.location,
tags=nsg.tags,
)
results['rules'] = []
if nsg.security_rules:
for rule in nsg.security_rules:
results['rules'].append(create_rule_dict_from_obj(rule))
results['default_rules'] = []
if nsg.default_security_rules:
for rule in nsg.default_security_rules:
results['default_rules'].append(create_rule_dict_from_obj(rule))
results['network_interfaces'] = []
if nsg.network_interfaces:
for interface in nsg.network_interfaces:
results['network_interfaces'].append(interface.id)
results['subnets'] = []
if nsg.subnets:
for subnet in nsg.subnets:
results['subnets'].append(subnet.id)
return results
class AzureRMSecurityGroup(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
default_rules=dict(type='list'),
location=dict(type='str'),
name=dict(type='str', required=True),
purge_default_rules=dict(type='bool', default=False),
purge_rules=dict(type='bool', default=False),
resource_group=dict(required=True, type='str'),
rules=dict(type='list'),
state=dict(type='str', default='present', choices=['present', 'absent']),
)
self.default_rules = None
self.location = None
self.name = None
self.purge_default_rules = None
self.purge_rules = None
self.resource_group = None
self.rules = None
self.state = None
self.tags = None
self.results = dict(
changed=False,
state=dict()
)
super(AzureRMSecurityGroup, self).__init__(self.module_arg_spec,
supports_check_mode=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec.keys() + ['tags']:
setattr(self, key, kwargs[key])
changed = False
results = dict()
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
# Set default location
self.location = resource_group.location
if self.rules:
for rule in self.rules:
try:
validate_rule(rule)
except Exception as exc:
self.fail("Error validating rule {0} - {1}".format(rule, str(exc)))
if self.default_rules:
for rule in self.default_rules:
try:
validate_rule(rule, 'default')
except Exception as exc:
self.fail("Error validating default rule {0} - {1}".format(rule, str(exc)))
try:
nsg = self.network_client.network_security_groups.get(self.resource_group, self.name)
results = create_network_security_group_dict(nsg)
self.log("Found security group:")
self.log(results, pretty_print=True)
self.check_provisioning_state(nsg, self.state)
if self.state == 'present':
pass
elif self.state == 'absent':
self.log("CHANGED: security group found but state is 'absent'")
changed = True
except CloudError:
if self.state == 'present':
self.log("CHANGED: security group not found and state is 'present'")
changed = True
if self.state == 'present' and not changed:
# update the security group
self.log("Update security group {0}".format(self.name))
if self.rules:
for rule in self.rules:
rule_matched = False
for r in results['rules']:
match, changed = compare_rules(r, rule)
if changed:
changed = True
if match:
rule_matched = True
if not rule_matched:
changed = True
results['rules'].append(rule)
if self.purge_rules:
new_rules = []
for rule in results['rules']:
for r in self.rules:
if rule['name'] == r['name']:
new_rules.append(rule)
results['rules'] = new_rules
if self.default_rules:
for rule in self.default_rules:
rule_matched = False
for r in results['default_rules']:
match, changed = compare_rules(r, rule)
if changed:
changed = True
if match:
rule_matched = True
if not rule_matched:
changed = True
results['default_rules'].append(rule)
if self.purge_default_rules:
new_default_rules = []
for rule in results['default_rules']:
for r in self.default_rules:
if rule['name'] == r['name']:
new_default_rules.append(rule)
results['default_rules'] = new_default_rules
update_tags, results['tags'] = self.update_tags(results['tags'])
if update_tags:
changed = True
self.results['changed'] = changed
self.results['state'] = results
if not self.check_mode:
self.results['state'] = self.create_or_update(results)
elif self.state == 'present' and changed:
# create the security group
self.log("Create security group {0}".format(self.name))
if not self.location:
self.fail("Parameter error: location required when creating a security group.")
results['name'] = self.name
results['location'] = self.location
results['rules'] = []
results['default_rules'] = []
results['tags'] = {}
if self.rules:
results['rules'] = self.rules
if self.default_rules:
results['default_rules'] = self.default_rules
if self.tags:
results['tags'] = self.tags
self.results['changed'] = changed
self.results['state'] = results
if not self.check_mode:
self.results['state'] = self.create_or_update(results)
elif self.state == 'absent' and changed:
self.log("Delete security group {0}".format(self.name))
self.results['changed'] = changed
self.results['state'] = dict()
if not self.check_mode:
self.delete()
# the delete does not actually return anything. if no exception, then we'll assume
# it worked.
self.results['state']['status'] = 'Deleted'
return self.results
def create_or_update(self, results):
parameters = NetworkSecurityGroup()
if results.get('rules'):
parameters.security_rules = []
for rule in results.get('rules'):
parameters.security_rules.append(create_rule_instance(rule))
if results.get('default_rules'):
parameters.default_security_rules = []
for rule in results.get('default_rules'):
parameters.default_security_rules.append(create_rule_instance(rule))
parameters.tags = results.get('tags')
parameters.location = results.get('location')
try:
poller = self.network_client.network_security_groups.create_or_update(self.resource_group,
self.name,
parameters)
result = self.get_poller_result(poller)
except AzureHttpError as exc:
self.fail("Error creating/upating security group {0} - {1}".format(self.name, str(exc)))
return create_network_security_group_dict(result)
def delete(self):
try:
poller = self.network_client.network_security_groups.delete(self.resource_group, self.name)
result = self.get_poller_result(poller)
except AzureHttpError as exc:
raise Exception("Error deleting security group {0} - {1}".format(self.name, str(exc)))
return result
def main():
AzureRMSecurityGroup()
if __name__ == '__main__':
main()
|
edxnercel/edx-platform | refs/heads/master | lms/djangoapps/instructor_analytics/tests/__init__.py | 12133432 | |
tjlaboss/openmc | refs/heads/develop | tests/regression_tests/energy_grid/__init__.py | 12133432 | |
lipis/hurry-app | refs/heads/master | main/lib/wtforms/ext/__init__.py | 12133432 | |
mjirayu/sit_academy | refs/heads/master | lms/djangoapps/ccx/plugins.py | 110 | """
Registers the CCX feature for the edX platform.
"""
from django.conf import settings
from django.utils.translation import ugettext_noop
from xmodule.tabs import CourseTab
from student.roles import CourseCcxCoachRole
class CcxCourseTab(CourseTab):
"""
The representation of the CCX course tab
"""
type = "ccx_coach"
title = ugettext_noop("CCX Coach")
view_name = "ccx_coach_dashboard"
is_dynamic = True # The CCX view is dynamically added to the set of tabs when it is enabled
@classmethod
def is_enabled(cls, course, user=None):
"""
Returns true if CCX has been enabled and the specified user is a coach
"""
if not user:
return True
if not settings.FEATURES.get('CUSTOM_COURSES_EDX', False) or not course.enable_ccx:
return False
role = CourseCcxCoachRole(course.id)
return role.has_user(user)
|
jamesblunt/sublime-evernote | refs/heads/master | lib/pygments/styles/vim.py | 135 | # -*- coding: utf-8 -*-
"""
pygments.styles.vim
~~~~~~~~~~~~~~~~~~~
A highlighting style for Pygments, inspired by vim.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace, Token
class VimStyle(Style):
"""
Styles somewhat like vim 7.0
"""
background_color = "#000000"
highlight_color = "#222222"
default_style = "#cccccc"
styles = {
Token: "#cccccc",
Whitespace: "",
Comment: "#000080",
Comment.Preproc: "",
Comment.Special: "bold #cd0000",
Keyword: "#cdcd00",
Keyword.Declaration: "#00cd00",
Keyword.Namespace: "#cd00cd",
Keyword.Pseudo: "",
Keyword.Type: "#00cd00",
Operator: "#3399cc",
Operator.Word: "#cdcd00",
Name: "",
Name.Class: "#00cdcd",
Name.Builtin: "#cd00cd",
Name.Exception: "bold #666699",
Name.Variable: "#00cdcd",
String: "#cd0000",
Number: "#cd00cd",
Generic.Heading: "bold #000080",
Generic.Subheading: "bold #800080",
Generic.Deleted: "#cd0000",
Generic.Inserted: "#00cd00",
Generic.Error: "#FF0000",
Generic.Emph: "italic",
Generic.Strong: "bold",
Generic.Prompt: "bold #000080",
Generic.Output: "#888",
Generic.Traceback: "#04D",
Error: "border:#FF0000"
}
|
marshallmcdonnell/interactive_plotting | refs/heads/master | TraitsUI/matplotlib/demo_group_size_matplotlib.py | 1 | """
Control the height and width of a Group
TraitsUI does not permit explicit specification of the height or width of a
Group (or its descendants). The workaround is to put each Group whose size you
wish to control into its own View class, which can be an Item (hence can be
size-controlled) in the larger View. Sometimes it is necessary to repeat such
surgery at several levels to get the desired layout.
We separate the left and right groups by a splitter (HSplit), and also
make the window itself resizable.
This demo has the Chaco stuff "ripped out" and replaced by Matplotlib
"""
#--------------------------------------------------------------------------#
# TraitsUI imports
from numpy import linspace, pi, sin
from traits.api import HasTraits, Instance, Str, Float, Int, Array, on_trait_change
# UItem is Unlabeled Item
from traitsui.api import View, Item, UItem, HSplit, InstanceEditor, \
VGroup, HGroup
#--------------------------------------------------------------------------#
# Matplotlib Imports
import matplotlib
# We want matplotlib to use a QT backend
matplotlib.use('Qt4Agg')
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from traits.api import Any, Instance
from traitsui.qt4.editor import Editor
from traitsui.qt4.basic_editor_factory import BasicEditorFactory
#--------------------------------------------------------------------------#
# Matplotlib Additions
class _MPLFigureEditor(Editor):
scrollable = True
def init(self, parent):
self.control = self._create_canvas(parent)
self.set_tooltip()
def update_editor(self):
pass
def _create_canvas(self, parent):
""" Create the MPL canvas. """
# matplotlib commands to create a canvas
mpl_canvas = FigureCanvas(self.value)
return mpl_canvas
class MPLFigureEditor(BasicEditorFactory):
klass = _MPLFigureEditor
#--------------------------------------------------------------------------#
# Main TraitsUI section
class PlotView(HasTraits):
"""Defines a sub-view whose size we wish to explicitly control."""
scale = Float(1.0)
xdata = Array
ydata = Array
figure = Instance(Figure, ())
view = View(
# This HGroup keeps 'n' from over-widening, by implicitly placing
# a spring to the right of the item.
HGroup(Item('scale')),
UItem('figure', editor=MPLFigureEditor()),
resizable=True,
)
@on_trait_change('xdata,ydata,scale')
def plot(self):
x = self.xdata
y = self.ydata
if len(x) != len(y): return
print self.figure
print self.figure.axes
if len(self.figure.axes) == 0:
axes = self.figure.add_subplot(111)
axes = self.figure.axes[0]
if not axes.lines:
axes.plot(x,self.scale*y)
else:
l = axes.lines[0]
l.set_xdata(x)
l.set_ydata(self.scale*y)
canvas = self.figure.canvas
if canvas is not None:
canvas.draw()
class VerticalBar(HasTraits):
"""Defines a sub-view whose size we wish to explicitly control."""
string = Str('abcdefg')
shift = Float('0.0')
view = View(
VGroup(
Item('string'),
Item('shift'),
show_border=True,
),
)
class BigView(HasTraits):
"""Defines the top-level view. It contains two side-by-side panels (a
vertical bar and a plot under an integer) whose relative sizes we wish
to control explicitly. If these were simply defined as Groups, we could
not control their sizes. But by extracting them as separate classes with
their own views, the resizing becomes possible, because they are loaded
as Items now.
"""
bar = Instance(VerticalBar, ())
plot = Instance(PlotView)
view = View(
HSplit(
# Specify pixel width of each sub-view. (Or, to specify
# proportionate width, use a width < 1.0)
# We specify the height here for sake of demonstration;
# it could also be specified for the top-level view.
UItem('bar', width=150,style='custom',editor=InstanceEditor() ),
UItem('plot', width=500, height=500,style='custom',editor=InstanceEditor() ),
show_border=True,
),
resizable=True,
)
@on_trait_change('bar.shift')
def shift_plot(self):
self.plot.ydata = self.plot.ydata + self.bar.shift
@on_trait_change('bar.string')
def print_string(self):
print self.bar.string
x = linspace(-2 * pi, 2 * pi, 100)
pv = PlotView(xdata=x, ydata=sin(x))
bv = BigView(plot=pv)
bv.configure_traits()
|
benjschiller/seriesoftubes | refs/heads/master | scripts/subtract_bam.py | 1 | #!/usr/bin/env python
'''
subtracts BAM (or SAM) files based on sequence names
(for example, use to remove reads that also map rRNA)
outputs to alignments_filtered.BAM
by default, looks in alignments.BAM for .bam files
and expects corresponding files in mapped/alignments.BAM
ignores the species folder name i.e. mapped/alignments.BAM(/foo)
e.g
alignments.BAM/crypto_h99_grubii/random/crypto_small_RNA.dcr.2010.s_5.no_linker.bam
mapped/alignments.BAM/crypto_h99_grubii_rRNA/random/crypto_small_RNA.dcr.2010.s_5.no_linker.bam
'''
import glob
import os
import scripter
from scripter import InvalidFileException
import pysam
from pkg_resources import get_distribution
__version__ = get_distribution('seriesoftubes').version
VERSION = __version__
def main():
e = scripter.Environment(version=VERSION, doc=__doc__)
parser = e.argument_parser
parser.add_argument('--sam-out', action='store_true',
help='Output SAM files (not BAM)')
parser.add_argument('--remove-all', action='store_true',
help='Remove all reads in mapped BAM/SAM file, not just mapped ones')
parser.set_defaults(**{'target': 'filtered'})
e.set_filename_parser(SubtractBamFilenameParser)
e.do_action(remove_reads)
def is_mapped(read):
'''
true if a pysam.AlignedRead is properly mapped, otherwise false
(checks if read is paired end; if so, returns if the pair is mapped)
'''
if read.is_paired: return read.is_proper_pair
else: return not read.is_unmapped
def remove_reads(parsed_filename, remove_all=False, sam_out=False,
debug=False, **kwargs):
'''
note: you must be looking at a sorted file, or this won't work
'''
if sam_out: write_opts = 'w'
else: write_opts = 'wb'
with pysam.Samfile(parsed_filename.mapped_file) as mapped:
if remove_all:
reads_to_remove = set([read.qname for read in mapped])
else:
reads_to_remove = set([read.qname for read in mapped
if is_mapped(read)])
if debug:
scripter.debug('Found {!s} reads in {!s}'.format(len(reads_to_remove),
parsed_filename.mapped_file))
with pysam.Samfile(parsed_filename.input_file) as bam_file:
with pysam.Samfile(parsed_filename.output_file, write_opts,
template = bam_file) as out_bam_file:
for read in bam_file:
if read.qname not in reads_to_remove:
out_bam_file.write(read)
return
class SubtractBamFilenameParser(scripter.FilenameParser):
def __init__(self, filename, sam_out=False, *args, **kwargs):
super(SubtractBamFilenameParser, self).__init__(filename,
sam_out=sam_out,
*args,
**kwargs)
fext = os.path.splitext(filename)[1].rstrip(os.extsep)
if not (fext == 'sam' or fext =='bam'): raise InvalidFileException
if not self.is_dummy_file:
# check for the mapped_file
input_dir_parts = self.input_dir.split(os.sep)
glob_path = ['mapped', input_dir_parts[0], '*'] + \
input_dir_parts[2:] + \
[os.path.basename(self.input_file)]
potential_filenames = glob.glob(os.sep.join(glob_path))
if len(potential_filenames) is 1:
self.mapped_file = potential_filenames[0]
elif len(potential_filenames) is 0:
raise scripter.Usage('Could not find mapped file')
else:
raise scripter.Usage('Ambiguous mapped file', *potential_filenames)
scripter.debug('Mapped file will be', self.mapped_file)
if sam_out:
self.output_file = os.sep.join([self.output_dir,
self.with_extension('sam')])
else:
self.output_file = os.sep.join([self.output_dir,
self.with_extension('bam')])
scripter.debug('Output file will be', self.output_file)
if __name__=="__main__": main()
|
fedesanchez/suite-3.1 | refs/heads/master | geoexplorer/app/static/externals/proj4js/tools/pjjs.py | 250 | #!/usr/bin/env python
#
# TODO explain
#
# -- Copyright 2007 IGN France / Geoportail project --
#
import sys
import os
import re
SUFFIX_JAVASCRIPT = ".js"
def _pjcat2js_remove(rezDirectory,catName,targetDirectory):
pjCatFilename = os.path.join(rezDirectory, catName)
pjCat = open(pjCatFilename,'r')
comment_re = re.compile("^#")
srsdef_re = re.compile("^<([^>]*)>.* <>$")
l = pjCat.readline()
while len(l) != 0:
if comment_re.search(l) is None:
srsdef_mo = srsdef_re.match(l)
srsdef_fn = os.path.join(targetDirectory, catName+srsdef_mo.group(1)+".js")
if os.path.exists(srsdef_fn):
os.remove(srsdef_fn)
l = pjCat.readline()
pjCat.close()
def _pjcat2js_make(rezDirectory,catName,targetDirectory):
pjCatFilename = os.path.join(rezDirectory, catName)
pjCat = open(pjCatFilename,'r')
comment_re = re.compile("^#")
srsdef_re = re.compile("^<([^>]*)> *(.*) <>$")
l = pjCat.readline()
while len(l) != 0:
if comment_re.search(l) is None:
srsdef_mo = srsdef_re.match(l)
srsdef_fn = os.path.join(targetDirectory, catName+srsdef_mo.group(1)+".js")
srsdef = 'Proj4js.defs["'+catName+':'+srsdef_mo.group(1)+'"]="'+srsdef_mo.group(2)+'";'
file(srsdef_fn,'w').write(srsdef)
l = pjCat.readline()
pjCat.close()
def pjcat2js_clean(rezDirectory,targetDirectory):
if not os.path.isdir(rezDirectory):
return
if not os.path.isdir(targetDirectory):
return
if os.path.abspath(rezDirectory) == '/':
return
if os.path.abspath(targetDirectory) == '/':
return
rezDirectory_name_len = len(rezDirectory)
for root, dirs, filenames in os.walk(rezDirectory):
if 'CVS' in dirs:
dirs.remove('CVS')
if '.svn' in dirs:
dirs.remove('.svn')
for filename in filenames:
if not filename.endswith(SUFFIX_JAVASCRIPT) and not filename.startswith("."):
filepath = os.path.join(root, filename)[rezDirectory_name_len+1:]
filepath = filepath.replace("\\", "/")
_pjcat2js_remove(rezDirectory,filepath,targetDirectory)
def pjcat2js_run(rezDirectory,targetDirectory):
if not os.path.isdir(rezDirectory):
return
if not os.path.isdir(targetDirectory):
return
if os.path.abspath(rezDirectory) == '/':
return
if os.path.abspath(targetDirectory) == '/':
return
rezDirectory_name_len = len(rezDirectory)
for root, dirs, filenames in os.walk(rezDirectory):
if 'CVS' in dirs:
dirs.remove('CVS')
if '.svn' in dirs:
dirs.remove('.svn')
for filename in filenames:
if not filename.endswith(SUFFIX_JAVASCRIPT) and not filename.startswith("."):
filepath = os.path.join(root, filename)[rezDirectory_name_len+1:]
filepath = filepath.replace("\\", "/")
_pjcat2js_make(rezDirectory,filepath,targetDirectory)
|
openprocurement/openprocurement.auctions.flash | refs/heads/master | openprocurement/auctions/flash/tests/__init__.py | 12133432 | |
webgeodatavore/django | refs/heads/master | tests/sitemaps_tests/__init__.py | 12133432 | |
arju88nair/projectCulminate | refs/heads/master | venv/lib/python3.5/site-packages/pymongo/ssl_match_hostname.py | 21 | # Backport of the match_hostname logic from python 3.5, with small
# changes to support IP address matching on python 2.6, 2.7, 3.3, and 3.4.
import re
import sys
try:
# Python 3.3+, or the ipaddress module from pypi.
from ipaddress import ip_address
except ImportError:
ip_address = lambda address: None
# ipaddress.ip_address requires unicode
if sys.version_info[0] < 3:
_unicode = unicode
else:
_unicode = lambda value: value
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
parts = dn.split(r'.')
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def _ipaddress_match(ipname, host_ip):
"""Exact matching of IP addresses.
RFC 6125 explicitly doesn't define an algorithm for this
(section 1.7.2 - "Out of Scope").
"""
# OpenSSL may add a trailing newline to a subjectAltName's IP address
ip = ip_address(_unicode(ipname).rstrip())
return ip == host_ip
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate, match_hostname needs a "
"SSL socket or SSL context with either "
"CERT_OPTIONAL or CERT_REQUIRED")
try:
host_ip = ip_address(_unicode(hostname))
except (ValueError, UnicodeError):
# Not an IP address (common case)
host_ip = None
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if host_ip is None and _dnsname_match(value, hostname):
return
dnsnames.append(value)
elif key == 'IP Address':
if host_ip is not None and _ipaddress_match(value, host_ip):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
|
OSSESAC/odoopubarquiluz | refs/heads/7.0 | addons/crm_claim/__init__.py | 53 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crm_claim
import report
import res_config
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
phoebusliang/parallel-lettuce | refs/heads/master | tests/integration/lib/Django-1.2.5/tests/regressiontests/extra_regress/__init__.py | 12133432 | |
vladryk/horizon | refs/heads/master | openstack_dashboard/management/__init__.py | 12133432 | |
kenshay/ImageScript | refs/heads/master | ProgramData/SystemFiles/Python/Lib/site-packages/django/template/loaders/__init__.py | 12133432 | |
hotpoor-for-Liwei/hj_hackathon_201607 | refs/heads/master | tornado/platform/__init__.py | 12133432 | |
yoer/hue | refs/heads/master | desktop/core/ext-py/Django-1.6.10/tests/app_loading/models.py | 12133432 | |
mohamedhagag/community-addons | refs/heads/8.0 | price_security/__init__.py | 119 | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from . import models
|
menardorama/ReadyNAS-Add-ons | refs/heads/master | headphones-1.0.0/files/apps/headphones/lib/mutagen/_tags.py | 31 | # Copyright (C) 2005 Michael Urman
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
class Metadata(object):
"""An abstract dict-like object.
Metadata is the base class for many of the tag objects in Mutagen.
"""
__module__ = "mutagen"
def __init__(self, *args, **kwargs):
if args or kwargs:
self.load(*args, **kwargs)
def load(self, *args, **kwargs):
raise NotImplementedError
def save(self, filename=None):
"""Save changes to a file."""
raise NotImplementedError
def delete(self, filename=None):
"""Remove tags from a file."""
raise NotImplementedError
|
apixandru/intellij-community | refs/heads/master | python/testData/copyPaste/Indent2.dst.py | 83 | def bar():
<caret>
var = "string" |
dkillick/iris | refs/heads/master | lib/iris/tests/integration/plot/test_colorbar.py | 6 | # (C) British Crown Copyright 2014 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Test interaction between :mod:`iris.plot` and
:func:`matplotlib.pyplot.colorbar`
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# import iris tests first so that some things can be initialised before
# importing anything else
import iris.tests as tests
import numpy as np
from iris.coords import AuxCoord
import iris.tests.stock
# Run tests in no graphics mode if matplotlib is not available.
if tests.MPL_AVAILABLE:
import matplotlib.pyplot as plt
from iris.plot import contour, contourf, pcolormesh, pcolor,\
points, scatter
@tests.skip_plot
class TestColorBarCreation(tests.GraphicsTest):
def setUp(self):
super(TestColorBarCreation, self).setUp()
self.draw_functions = (contour, contourf, pcolormesh, pcolor)
self.cube = iris.tests.stock.lat_lon_cube()
self.cube.coord('longitude').guess_bounds()
self.cube.coord('latitude').guess_bounds()
self.traj_lon = AuxCoord(np.linspace(-180, 180, 50),
standard_name='longitude',
units='degrees')
self.traj_lat = AuxCoord(np.sin(np.deg2rad(self.traj_lon.points))*30.0,
standard_name='latitude',
units='degrees')
def test_common_draw_functions(self):
for draw_function in self.draw_functions:
mappable = draw_function(self.cube)
cbar = plt.colorbar()
self.assertIs(
cbar.mappable, mappable,
msg='Problem with draw function iris.plot.{}'.format(
draw_function.__name__))
def test_common_draw_functions_specified_mappable(self):
for draw_function in self.draw_functions:
mappable_initial = draw_function(self.cube, cmap='cool')
mappable = draw_function(self.cube)
cbar = plt.colorbar(mappable_initial)
self.assertIs(
cbar.mappable, mappable_initial,
msg='Problem with draw function iris.plot.{}'.format(
draw_function.__name__))
def test_points_with_c_kwarg(self):
mappable = points(self.cube, c=self.cube.data)
cbar = plt.colorbar()
self.assertIs(cbar.mappable, mappable)
def test_points_with_c_kwarg_specified_mappable(self):
mappable_initial = points(self.cube, c=self.cube.data, cmap='cool')
mappable = points(self.cube, c=self.cube.data)
cbar = plt.colorbar(mappable_initial)
self.assertIs(cbar.mappable, mappable_initial)
def test_scatter_with_c_kwarg(self):
mappable = scatter(self.traj_lon, self.traj_lat,
c=self.traj_lon.points)
cbar = plt.colorbar()
self.assertIs(cbar.mappable, mappable)
def test_scatter_with_c_kwarg_specified_mappable(self):
mappable_initial = scatter(self.traj_lon, self.traj_lat,
c=self.traj_lon.points)
mappable = scatter(self.traj_lon, self.traj_lat,
c=self.traj_lon.points,
cmap='cool')
cbar = plt.colorbar(mappable_initial)
self.assertIs(cbar.mappable, mappable_initial)
if __name__ == "__main__":
tests.main()
|
richardcs/ansible | refs/heads/devel | lib/ansible/modules/network/eos/eos_l2_interface.py | 30 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: eos_l2_interface
version_added: "2.5"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
short_description: Manage L2 interfaces on Arista EOS network devices.
description:
- This module provides declarative management of L2 interfaces
on Arista EOS network devices.
notes:
- Tested against EOS 4.15
options:
name:
description:
- Name of the interface
required: true
aliases: ['interface']
mode:
description:
- Mode in which interface needs to be configured.
choices: ['access','trunk']
access_vlan:
description:
- Configure given VLAN in access port.
If C(mode=access), used as the access VLAN ID.
native_vlan:
description:
- Native VLAN to be configured in trunk port.
If C(mode=trunk), used as the trunk native VLAN ID.
trunk_allowed_vlans:
description:
- List of allowed VLANs in a given trunk port.
If C(mode=trunk), these are the ONLY VLANs that will be
configured on the trunk, i.e. C(2-10,15).
aliases: ['trunk_vlans']
aggregate:
description:
- List of Layer-2 interface definitions.
state:
description:
- Manage the state of the Layer-2 Interface configuration.
default: present
choices: ['present','absent', 'unconfigured']
extends_documentation_fragment: eos
"""
EXAMPLES = """
- name: Ensure Ethernet1 does not have any switchport
eos_l2_interface:
name: Ethernet1
state: absent
- name: Ensure Ethernet1 is configured for access vlan 20
eos_l2_interface:
name: Ethernet1
mode: access
access_vlan: 20
- name: Ensure Ethernet1 is a trunk port and ensure 2-50 are being tagged (doesn't mean others aren't also being tagged)
eos_l2_interface:
name: Ethernet1
mode: trunk
native_vlan: 10
trunk_allowed_vlans: 2-50
- name: Set switchports on aggregate
eos_l2_interface:
aggregate:
- { name: ethernet1, mode: access, access_vlan: 20}
- { name: ethernet2, mode: trunk, native_vlan: 10}
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always.
type: list
sample:
- interface ethernet1
- switchport access vlan 20
"""
import re
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.config import NetworkConfig
from ansible.module_utils.network.common.utils import remove_default_spec
from ansible.module_utils.network.eos.eos import get_config, load_config, run_commands
from ansible.module_utils.network.eos.eos import eos_argument_spec
def parse_config_argument(configobj, name, arg=None):
cfg = configobj['interface %s' % name]
cfg = '\n'.join(cfg.children)
match = re.search(r'%s (.+)$' % arg, cfg, re.M)
if match:
return match.group(1).strip()
def search_obj_in_list(name, lst):
for o in lst:
if o['name'] == name:
return o
return None
def map_obj_to_commands(updates, module):
commands = list()
want, have = updates
for w in want:
name = w['name']
state = w['state']
mode = w['mode']
access_vlan = w['access_vlan']
native_vlan = w['native_vlan']
trunk_allowed_vlans = w['trunk_allowed_vlans']
interface = 'interface ' + name
commands.append(interface)
obj_in_have = search_obj_in_list(name, have)
if not obj_in_have:
module.fail_json(msg='invalid interface {0}'.format(name))
if state == 'absent':
if obj_in_have['mode'] == 'access':
commands.append('no switchport access vlan {0}'.format(obj_in_have['access_vlan']))
if obj_in_have['mode'] == 'trunk':
commands.append('no switchport mode trunk')
if obj_in_have['native_vlan']:
commands.append('no switchport trunk native vlan {0}'.format(obj_in_have['native_vlan']))
if obj_in_have['trunk_allowed_vlans']:
commands.append('no switchport trunk allowed vlan {0}'.format(obj_in_have['trunk_allowed_vlans']))
if obj_in_have['state'] == 'present':
commands.append('no switchport')
else:
if obj_in_have['state'] == 'absent':
commands.append('switchport')
commands.append('switchport mode {0}'.format(mode))
if access_vlan:
commands.append('switchport access vlan {0}'.format(access_vlan))
if native_vlan:
commands.append('switchport trunk native vlan {0}'.format(native_vlan))
if trunk_allowed_vlans:
commands.append('switchport trunk allowed vlan {0}'.format(trunk_allowed_vlans))
else:
if mode != obj_in_have['mode']:
if obj_in_have['mode'] == 'access':
commands.append('no switchport access vlan {0}'.format(obj_in_have['access_vlan']))
if native_vlan:
commands.append('switchport trunk native vlan {0}'.format(native_vlan))
if trunk_allowed_vlans:
commands.append('switchport trunk allowed vlan {0}'.format(trunk_allowed_vlans))
else:
if obj_in_have['native_vlan']:
commands.append('not switchport trunk native vlan {0}'.format(obj_in_have['native_vlan']))
if obj_in_have['trunk_allowed_vlans']:
commands.append('not switchport trunk allowed vlan {0}'.format(obj_in_have['trunk_allowed_vlans']))
commands.append('switchport access vlan {0}'.format(access_vlan))
else:
if mode == 'access':
if access_vlan != obj_in_have['access_vlan']:
commands.append('switchport access vlan {0}'.format(access_vlan))
else:
if native_vlan != obj_in_have['native_vlan'] and native_vlan:
commands.append('switchport trunk native vlan {0}'.format(native_vlan))
if trunk_allowed_vlans != obj_in_have['trunk_allowed_vlans'] and trunk_allowed_vlans:
commands.append('switchport trunk allowed vlan {0}'.format(trunk_allowed_vlans))
if commands[-1] == interface:
commands.pop(-1)
return commands
def map_config_to_obj(module):
config = get_config(module, flags=['| section interface'])
configobj = NetworkConfig(indent=3, contents=config)
match = re.findall(r'^interface (\S+)', config, re.M)
if not match:
return list()
instances = list()
for item in set(match):
command = {'command': 'show interfaces {0} switchport | include Switchport'.format(item),
'output': 'text'}
switchport_cfg = run_commands(module, command)[0].split(':')[1].strip()
if switchport_cfg == 'Enabled':
state = 'present'
else:
state = 'absent'
obj = {
'name': item.lower(),
'state': state,
}
obj['access_vlan'] = parse_config_argument(configobj, item, 'switchport access vlan')
obj['native_vlan'] = parse_config_argument(configobj, item, 'switchport trunk native vlan')
obj['trunk_allowed_vlans'] = parse_config_argument(configobj, item, 'switchport trunk allowed vlan')
if obj['access_vlan']:
obj['mode'] = 'access'
else:
obj['mode'] = 'trunk'
instances.append(obj)
return instances
def map_params_to_obj(module):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
item['name'] = item['name'].lower()
obj.append(item.copy())
else:
obj.append({
'name': module.params['name'].lower(),
'mode': module.params['mode'],
'access_vlan': module.params['access_vlan'],
'native_vlan': module.params['native_vlan'],
'trunk_allowed_vlans': module.params['trunk_allowed_vlans'],
'state': module.params['state']
})
return obj
def main():
""" main entry point for module execution
"""
element_spec = dict(
name=dict(type='str', aliases=['interface']),
mode=dict(choices=['access', 'trunk']),
access_vlan=dict(type='str'),
native_vlan=dict(type='str'),
trunk_allowed_vlans=dict(type='str', aliases=['trunk_vlans']),
state=dict(default='present',
choices=['present', 'absent'])
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['name'] = dict(required=True)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec),
)
argument_spec.update(element_spec)
argument_spec.update(eos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=[['access_vlan', 'native_vlan'],
['access_vlan', 'trunk_allowed_vlans']],
supports_check_mode=True)
warnings = list()
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have), module)
result['commands'] = commands
if commands:
commit = not module.check_mode
response = load_config(module, commands, commit=commit)
if response.get('diff') and module._diff:
result['diff'] = {'prepared': response.get('diff')}
result['session_name'] = response.get('session')
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
vipul-sharma20/oh-mainline | refs/heads/master | vendor/packages/python-social-auth/social/tests/models.py | 34 | import base64
from social.storage.base import UserMixin, NonceMixin, AssociationMixin, \
CodeMixin, BaseStorage
class BaseModel(object):
@classmethod
def next_id(cls):
cls.NEXT_ID += 1
return cls.NEXT_ID - 1
@classmethod
def get(cls, key):
return cls.cache.get(key)
@classmethod
def reset_cache(cls):
cls.cache = {}
class User(BaseModel):
NEXT_ID = 1
cache = {}
_is_active = True
def __init__(self, username, email=None):
self.id = User.next_id()
self.username = username
self.email = email
self.password = None
self.slug = None
self.social = []
self.extra_data = {}
self.save()
def is_active(self):
return self._is_active
@classmethod
def set_active(cls, is_active=True):
cls._is_active = is_active
def set_password(self, password):
self.password = password
def save(self):
User.cache[self.username] = self
class TestUserSocialAuth(UserMixin, BaseModel):
NEXT_ID = 1
cache = {}
cache_by_uid = {}
def __init__(self, user, provider, uid, extra_data=None):
self.id = TestUserSocialAuth.next_id()
self.user = user
self.provider = provider
self.uid = uid
self.extra_data = extra_data or {}
self.user.social.append(self)
TestUserSocialAuth.cache_by_uid[uid] = self
def save(self):
pass
@classmethod
def reset_cache(cls):
cls.cache = {}
cls.cache_by_uid = {}
@classmethod
def changed(cls, user):
pass
@classmethod
def get_username(cls, user):
return user.username
@classmethod
def user_model(cls):
return User
@classmethod
def username_max_length(cls):
return 1024
@classmethod
def allowed_to_disconnect(cls, user, backend_name, association_id=None):
return user.password or len(user.social) > 1
@classmethod
def disconnect(cls, entry):
cls.cache.pop(entry.id, None)
entry.user.social = [s for s in entry.user.social if entry != s]
@classmethod
def user_exists(cls, username):
return User.cache.get(username) is not None
@classmethod
def create_user(cls, username, email=None):
return User(username=username, email=email)
@classmethod
def get_user(cls, pk):
for username, user in User.cache.items():
if user.id == pk:
return user
@classmethod
def get_social_auth(cls, provider, uid):
social_user = cls.cache_by_uid.get(uid)
if social_user and social_user.provider == provider:
return social_user
@classmethod
def get_social_auth_for_user(cls, user, provider=None, id=None):
return user.social
@classmethod
def create_social_auth(cls, user, uid, provider):
return cls(user=user, provider=provider, uid=uid)
@classmethod
def get_users_by_email(cls, email):
return [user for user in User.cache.values() if user.email == email]
class TestNonce(NonceMixin, BaseModel):
NEXT_ID = 1
cache = {}
def __init__(self, server_url, timestamp, salt):
self.id = TestNonce.next_id()
self.server_url = server_url
self.timestamp = timestamp
self.salt = salt
@classmethod
def use(cls, server_url, timestamp, salt):
nonce = TestNonce(server_url, timestamp, salt)
TestNonce.cache[server_url] = nonce
return nonce
class TestAssociation(AssociationMixin, BaseModel):
NEXT_ID = 1
cache = {}
def __init__(self, server_url, handle):
self.id = TestAssociation.next_id()
self.server_url = server_url
self.handle = handle
def save(self):
TestAssociation.cache[(self.server_url, self.handle)] = self
@classmethod
def store(cls, server_url, association):
assoc = TestAssociation.cache.get((server_url, association.handle))
if assoc is None:
assoc = TestAssociation(server_url=server_url,
handle=association.handle)
assoc.secret = base64.encodestring(association.secret)
assoc.issued = association.issued
assoc.lifetime = association.lifetime
assoc.assoc_type = association.assoc_type
assoc.save()
@classmethod
def get(cls, server_url=None, handle=None):
result = []
for assoc in TestAssociation.cache.values():
if server_url and assoc.server_url != server_url:
continue
if handle and assoc.handle != handle:
continue
result.append(assoc)
return result
@classmethod
def remove(cls, ids_to_delete):
assoc = filter(lambda a: a.id in ids_to_delete,
TestAssociation.cache.values())
for a in list(assoc):
TestAssociation.cache.pop((a.server_url, a.handle), None)
class TestCode(CodeMixin, BaseModel):
NEXT_ID = 1
cache = {}
@classmethod
def get_code(cls, code):
for c in cls.cache.values():
if c.code == code:
return c
class TestStorage(BaseStorage):
user = TestUserSocialAuth
nonce = TestNonce
association = TestAssociation
code = TestCode
|
juselius/hindarot | refs/heads/master | config/mkdep90.py | 1 | #!/usr/bin/env python
#
# $Id$
#
import sys, string, re
use=re.compile('\s*use\s+(\w+)', re.I)
mod=re.compile('\s*module(?!\s+procedure)\s+(\w+)', re.I)
class depfile:
def __init__(self, name):
self.name=name
self.oname=name[:-3]+'o'
self.uses={}
def adduses(self, u):
self.uses[u]=''
def main():
allmods={}
dfiles=[]
for ff in sys.argv[1:]:
fd=open(ff, 'r')
buf=fd.readlines()
fd.close()
dfiles.append(depfile(ff))
for ln in buf:
m=mod.match(ln)
if m is not None:
allmods[m.group(1)]=ff
else:
m=use.match(ln)
if m is not None:
dfiles[-1].adduses(m.group(1))
for df in dfiles:
deps=[]
for dd in df.uses.keys():
try:
if (allmods[dd] != df.name):
omod=allmods[dd][:-3]+'o'
deps.append(omod)
except:
print >> sys.stderr, 'Missing dependency for', dd, 'in',\
df.name
if deps:
dstr=df.oname+': '
for i in deps:
dstr=dstr+i+' '
print dstr
if __name__ == '__main__':
main()
|
StrellaGroup/frappe | refs/heads/develop | frappe/contacts/doctype/address_template/address_template.py | 17 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.utils.jinja import validate_template
from frappe import _
class AddressTemplate(Document):
def validate(self):
if not self.template:
self.template = get_default_address_template()
self.defaults = frappe.db.get_values("Address Template", {"is_default":1, "name":("!=", self.name)})
if not self.is_default:
if not self.defaults:
self.is_default = 1
frappe.msgprint(_("Setting this Address Template as default as there is no other default"))
validate_template(self.template)
def on_update(self):
if self.is_default and self.defaults:
for d in self.defaults:
frappe.db.set_value("Address Template", d[0], "is_default", 0)
def on_trash(self):
if self.is_default:
frappe.throw(_("Default Address Template cannot be deleted"))
@frappe.whitelist()
def get_default_address_template():
'''Get default address template (translated)'''
return '''{{ address_line1 }}<br>{% if address_line2 %}{{ address_line2 }}<br>{% endif -%}\
{{ city }}<br>
{% if state %}{{ state }}<br>{% endif -%}
{% if pincode %}{{ pincode }}<br>{% endif -%}
{{ country }}<br>
{% if phone %}'''+_('Phone')+''': {{ phone }}<br>{% endif -%}
{% if fax %}'''+_('Fax')+''': {{ fax }}<br>{% endif -%}
{% if email_id %}'''+_('Email')+''': {{ email_id }}<br>{% endif -%}'''
|
endlessm/chromium-browser | refs/heads/master | third_party/angle/third_party/glmark2/src/waflib/Task.py | 4 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
import os,re,sys,tempfile
from waflib import Utils,Logs,Errors
NOT_RUN=0
MISSING=1
CRASHED=2
EXCEPTION=3
SKIPPED=8
SUCCESS=9
ASK_LATER=-1
SKIP_ME=-2
RUN_ME=-3
COMPILE_TEMPLATE_SHELL='''
def f(tsk):
env = tsk.env
gen = tsk.generator
bld = gen.bld
cwdx = tsk.get_cwd()
p = env.get_flat
tsk.last_cmd = cmd = \'\'\' %s \'\'\' % s
return tsk.exec_command(cmd, cwd=cwdx, env=env.env or None)
'''
COMPILE_TEMPLATE_NOSHELL='''
def f(tsk):
env = tsk.env
gen = tsk.generator
bld = gen.bld
cwdx = tsk.get_cwd()
def to_list(xx):
if isinstance(xx, str): return [xx]
return xx
def merge(lst1, lst2):
if lst1 and lst2:
return lst1[:-1] + [lst1[-1] + lst2[0]] + lst2[1:]
return lst1 + lst2
lst = []
%s
if '' in lst:
lst = [x for x in lst if x]
tsk.last_cmd = lst
return tsk.exec_command(lst, cwd=cwdx, env=env.env or None)
'''
classes={}
class store_task_type(type):
def __init__(cls,name,bases,dict):
super(store_task_type,cls).__init__(name,bases,dict)
name=cls.__name__
if name!='evil'and name!='TaskBase':
global classes
if getattr(cls,'run_str',None):
(f,dvars)=compile_fun(cls.run_str,cls.shell)
cls.hcode=Utils.h_cmd(cls.run_str)
cls.orig_run_str=cls.run_str
cls.run_str=None
cls.run=f
cls.vars=list(set(cls.vars+dvars))
cls.vars.sort()
elif getattr(cls,'run',None)and not'hcode'in cls.__dict__:
cls.hcode=Utils.h_cmd(cls.run)
getattr(cls,'register',classes)[name]=cls
evil=store_task_type('evil',(object,),{})
class TaskBase(evil):
color='GREEN'
ext_in=[]
ext_out=[]
before=[]
after=[]
hcode=''
keep_last_cmd=False
__slots__=('hasrun','generator')
def __init__(self,*k,**kw):
self.hasrun=NOT_RUN
try:
self.generator=kw['generator']
except KeyError:
self.generator=self
def __repr__(self):
return'\n\t{task %r: %s %s}'%(self.__class__.__name__,id(self),str(getattr(self,'fun','')))
def __str__(self):
if hasattr(self,'fun'):
return self.fun.__name__
return self.__class__.__name__
def keyword(self):
if hasattr(self,'fun'):
return'Function'
return'Processing'
def get_cwd(self):
bld=self.generator.bld
ret=getattr(self,'cwd',None)or getattr(self.generator.bld,'cwd',bld.bldnode)
if isinstance(ret,str):
self.generator.bld.fatal('Working folders given to tasks must be Node objects')
return ret
def quote_flag(self,x):
old=x
if'\\'in x:
x=x.replace('\\','\\\\')
if'"'in x:
x=x.replace('"','\\"')
if old!=x or' 'in x or'\t'in x or"'"in x:
x='"%s"'%x
return x
def split_argfile(self,cmd):
return([cmd[0]],[self.quote_flag(x)for x in cmd[1:]])
def exec_command(self,cmd,**kw):
if not'cwd'in kw:
kw['cwd']=self.get_cwd()
if self.env.PATH:
env=kw['env']=dict(kw.get('env')or self.env.env or os.environ)
env['PATH']=self.env.PATH if isinstance(self.env.PATH,str)else os.pathsep.join(self.env.PATH)
if not isinstance(cmd,str)and(len(repr(cmd))>=8192 if Utils.is_win32 else len(cmd)>200000):
cmd,args=self.split_argfile(cmd)
try:
(fd,tmp)=tempfile.mkstemp()
os.write(fd,'\r\n'.join(args).encode())
os.close(fd)
return self.generator.bld.exec_command(cmd+['@'+tmp],**kw)
finally:
try:
os.remove(tmp)
except OSError:
pass
else:
return self.generator.bld.exec_command(cmd,**kw)
def runnable_status(self):
return RUN_ME
def uid(self):
return Utils.SIG_NIL
def process(self):
m=self.generator.bld.producer
try:
del self.generator.bld.task_sigs[self.uid()]
except KeyError:
pass
try:
ret=self.run()
except Exception:
self.err_msg=Utils.ex_stack()
self.hasrun=EXCEPTION
m.error_handler(self)
return
if ret:
self.err_code=ret
self.hasrun=CRASHED
else:
try:
self.post_run()
except Errors.WafError:
pass
except Exception:
self.err_msg=Utils.ex_stack()
self.hasrun=EXCEPTION
else:
self.hasrun=SUCCESS
if self.hasrun!=SUCCESS:
m.error_handler(self)
def run(self):
if hasattr(self,'fun'):
return self.fun(self)
return 0
def post_run(self):
pass
def log_display(self,bld):
if self.generator.bld.progress_bar==3:
return
s=self.display()
if s:
if bld.logger:
logger=bld.logger
else:
logger=Logs
if self.generator.bld.progress_bar==1:
c1=Logs.colors.cursor_off
c2=Logs.colors.cursor_on
logger.info(s,extra={'stream':sys.stderr,'terminator':'','c1':c1,'c2':c2})
else:
logger.info(s,extra={'terminator':'','c1':'','c2':''})
def display(self):
col1=Logs.colors(self.color)
col2=Logs.colors.NORMAL
master=self.generator.bld.producer
def cur():
tmp=-1
if hasattr(master,'ready'):
tmp-=master.ready.qsize()
return master.processed+tmp
if self.generator.bld.progress_bar==1:
return self.generator.bld.progress_line(cur(),master.total,col1,col2)
if self.generator.bld.progress_bar==2:
ela=str(self.generator.bld.timer)
try:
ins=','.join([n.name for n in self.inputs])
except AttributeError:
ins=''
try:
outs=','.join([n.name for n in self.outputs])
except AttributeError:
outs=''
return'|Total %s|Current %s|Inputs %s|Outputs %s|Time %s|\n'%(master.total,cur(),ins,outs,ela)
s=str(self)
if not s:
return None
total=master.total
n=len(str(total))
fs='[%%%dd/%%%dd] %%s%%s%%s%%s\n'%(n,n)
kw=self.keyword()
if kw:
kw+=' '
return fs%(cur(),total,kw,col1,s,col2)
def hash_constraints(self):
cls=self.__class__
tup=(str(cls.before),str(cls.after),str(cls.ext_in),str(cls.ext_out),cls.__name__,cls.hcode)
return hash(tup)
def format_error(self):
msg=getattr(self,'last_cmd','')
name=getattr(self.generator,'name','')
if getattr(self,"err_msg",None):
return self.err_msg
elif not self.hasrun:
return'task in %r was not executed for some reason: %r'%(name,self)
elif self.hasrun==CRASHED:
try:
return' -> task in %r failed (exit status %r): %r\n%r'%(name,self.err_code,self,msg)
except AttributeError:
return' -> task in %r failed: %r\n%r'%(name,self,msg)
elif self.hasrun==MISSING:
return' -> missing files in %r: %r\n%r'%(name,self,msg)
else:
return'invalid status for task in %r: %r'%(name,self.hasrun)
def colon(self,var1,var2):
tmp=self.env[var1]
if not tmp:
return[]
if isinstance(var2,str):
it=self.env[var2]
else:
it=var2
if isinstance(tmp,str):
return[tmp%x for x in it]
else:
lst=[]
for y in it:
lst.extend(tmp)
lst.append(y)
return lst
class Task(TaskBase):
vars=[]
always_run=False
shell=False
def __init__(self,*k,**kw):
TaskBase.__init__(self,*k,**kw)
self.env=kw['env']
self.inputs=[]
self.outputs=[]
self.dep_nodes=[]
self.run_after=set([])
def __str__(self):
name=self.__class__.__name__
if self.outputs:
if name.endswith(('lib','program'))or not self.inputs:
node=self.outputs[0]
return node.path_from(node.ctx.launch_node())
if not(self.inputs or self.outputs):
return self.__class__.__name__
if len(self.inputs)==1:
node=self.inputs[0]
return node.path_from(node.ctx.launch_node())
src_str=' '.join([a.path_from(a.ctx.launch_node())for a in self.inputs])
tgt_str=' '.join([a.path_from(a.ctx.launch_node())for a in self.outputs])
if self.outputs:sep=' -> '
else:sep=''
return'%s: %s%s%s'%(self.__class__.__name__,src_str,sep,tgt_str)
def keyword(self):
name=self.__class__.__name__
if name.endswith(('lib','program')):
return'Linking'
if len(self.inputs)==1 and len(self.outputs)==1:
return'Compiling'
if not self.inputs:
if self.outputs:
return'Creating'
else:
return'Running'
return'Processing'
def __repr__(self):
try:
ins=",".join([x.name for x in self.inputs])
outs=",".join([x.name for x in self.outputs])
except AttributeError:
ins=",".join([str(x)for x in self.inputs])
outs=",".join([str(x)for x in self.outputs])
return"".join(['\n\t{task %r: '%id(self),self.__class__.__name__," ",ins," -> ",outs,'}'])
def uid(self):
try:
return self.uid_
except AttributeError:
m=Utils.md5(self.__class__.__name__)
up=m.update
for x in self.inputs+self.outputs:
up(x.abspath())
self.uid_=m.digest()
return self.uid_
def set_inputs(self,inp):
if isinstance(inp,list):self.inputs+=inp
else:self.inputs.append(inp)
def set_outputs(self,out):
if isinstance(out,list):self.outputs+=out
else:self.outputs.append(out)
def set_run_after(self,task):
assert isinstance(task,TaskBase)
self.run_after.add(task)
def signature(self):
try:
return self.cache_sig
except AttributeError:
pass
self.m=Utils.md5(self.hcode)
self.sig_explicit_deps()
self.sig_vars()
if self.scan:
try:
self.sig_implicit_deps()
except Errors.TaskRescan:
return self.signature()
ret=self.cache_sig=self.m.digest()
return ret
def runnable_status(self):
for t in self.run_after:
if not t.hasrun:
return ASK_LATER
try:
new_sig=self.signature()
except Errors.TaskNotReady:
return ASK_LATER
bld=self.generator.bld
key=self.uid()
try:
prev_sig=bld.task_sigs[key]
except KeyError:
Logs.debug('task: task %r must run: it was never run before or the task code changed',self)
return RUN_ME
if new_sig!=prev_sig:
Logs.debug('task: task %r must run: the task signature changed',self)
return RUN_ME
for node in self.outputs:
sig=bld.node_sigs.get(node)
if not sig:
Logs.debug('task: task %r must run: an output node has no signature',self)
return RUN_ME
if sig!=key:
Logs.debug('task: task %r must run: an output node was produced by another task',self)
return RUN_ME
if not node.exists():
Logs.debug('task: task %r must run: an output node does not exist',self)
return RUN_ME
return(self.always_run and RUN_ME)or SKIP_ME
def post_run(self):
bld=self.generator.bld
for node in self.outputs:
if not node.exists():
self.hasrun=MISSING
self.err_msg='-> missing file: %r'%node.abspath()
raise Errors.WafError(self.err_msg)
bld.node_sigs[node]=self.uid()
bld.task_sigs[self.uid()]=self.signature()
if not self.keep_last_cmd:
try:
del self.last_cmd
except AttributeError:
pass
def sig_explicit_deps(self):
bld=self.generator.bld
upd=self.m.update
for x in self.inputs+self.dep_nodes:
upd(x.get_bld_sig())
if bld.deps_man:
additional_deps=bld.deps_man
for x in self.inputs+self.outputs:
try:
d=additional_deps[x]
except KeyError:
continue
for v in d:
if isinstance(v,bld.root.__class__):
v=v.get_bld_sig()
elif hasattr(v,'__call__'):
v=v()
upd(v)
def sig_vars(self):
sig=self.generator.bld.hash_env_vars(self.env,self.__class__.vars)
self.m.update(sig)
scan=None
def sig_implicit_deps(self):
bld=self.generator.bld
key=self.uid()
prev=bld.imp_sigs.get(key,[])
if prev:
try:
if prev==self.compute_sig_implicit_deps():
return prev
except Errors.TaskNotReady:
raise
except EnvironmentError:
for x in bld.node_deps.get(self.uid(),[]):
if not x.is_bld()and not x.exists():
try:
del x.parent.children[x.name]
except KeyError:
pass
del bld.imp_sigs[key]
raise Errors.TaskRescan('rescan')
(bld.node_deps[key],bld.raw_deps[key])=self.scan()
if Logs.verbose:
Logs.debug('deps: scanner for %s: %r; unresolved: %r',self,bld.node_deps[key],bld.raw_deps[key])
try:
bld.imp_sigs[key]=self.compute_sig_implicit_deps()
except EnvironmentError:
for k in bld.node_deps.get(self.uid(),[]):
if not k.exists():
Logs.warn('Dependency %r for %r is missing: check the task declaration and the build order!',k,self)
raise
def compute_sig_implicit_deps(self):
upd=self.m.update
self.are_implicit_nodes_ready()
for k in self.generator.bld.node_deps.get(self.uid(),[]):
upd(k.get_bld_sig())
return self.m.digest()
def are_implicit_nodes_ready(self):
bld=self.generator.bld
try:
cache=bld.dct_implicit_nodes
except AttributeError:
bld.dct_implicit_nodes=cache={}
try:
dct=cache[bld.cur]
except KeyError:
dct=cache[bld.cur]={}
for tsk in bld.cur_tasks:
for x in tsk.outputs:
dct[x]=tsk
modified=False
for x in bld.node_deps.get(self.uid(),[]):
if x in dct:
self.run_after.add(dct[x])
modified=True
if modified:
for tsk in self.run_after:
if not tsk.hasrun:
raise Errors.TaskNotReady('not ready')
if sys.hexversion>0x3000000:
def uid(self):
try:
return self.uid_
except AttributeError:
m=Utils.md5(self.__class__.__name__.encode('iso8859-1','xmlcharrefreplace'))
up=m.update
for x in self.inputs+self.outputs:
up(x.abspath().encode('iso8859-1','xmlcharrefreplace'))
self.uid_=m.digest()
return self.uid_
uid.__doc__=Task.uid.__doc__
Task.uid=uid
def is_before(t1,t2):
to_list=Utils.to_list
for k in to_list(t2.ext_in):
if k in to_list(t1.ext_out):
return 1
if t1.__class__.__name__ in to_list(t2.after):
return 1
if t2.__class__.__name__ in to_list(t1.before):
return 1
return 0
def set_file_constraints(tasks):
ins=Utils.defaultdict(set)
outs=Utils.defaultdict(set)
for x in tasks:
for a in getattr(x,'inputs',[])+getattr(x,'dep_nodes',[]):
ins[id(a)].add(x)
for a in getattr(x,'outputs',[]):
outs[id(a)].add(x)
links=set(ins.keys()).intersection(outs.keys())
for k in links:
for a in ins[k]:
a.run_after.update(outs[k])
def set_precedence_constraints(tasks):
cstr_groups=Utils.defaultdict(list)
for x in tasks:
h=x.hash_constraints()
cstr_groups[h].append(x)
keys=list(cstr_groups.keys())
maxi=len(keys)
for i in range(maxi):
t1=cstr_groups[keys[i]][0]
for j in range(i+1,maxi):
t2=cstr_groups[keys[j]][0]
if is_before(t1,t2):
a=i
b=j
elif is_before(t2,t1):
a=j
b=i
else:
continue
aval=set(cstr_groups[keys[a]])
for x in cstr_groups[keys[b]]:
x.run_after.update(aval)
def funex(c):
dc={}
exec(c,dc)
return dc['f']
re_cond=re.compile('(?P<var>\w+)|(?P<or>\|)|(?P<and>&)')
re_novar=re.compile(r'^(SRC|TGT)\W+.*?$')
reg_act=re.compile(r'(?P<backslash>\\)|(?P<dollar>\$\$)|(?P<subst>\$\{(?P<var>\w+)(?P<code>.*?)\})',re.M)
def compile_fun_shell(line):
extr=[]
def repl(match):
g=match.group
if g('dollar'):
return"$"
elif g('backslash'):
return'\\\\'
elif g('subst'):
extr.append((g('var'),g('code')))
return"%s"
return None
line=reg_act.sub(repl,line)or line
def replc(m):
if m.group('and'):
return' and '
elif m.group('or'):
return' or '
else:
x=m.group('var')
if x not in dvars:
dvars.append(x)
return'env[%r]'%x
parm=[]
dvars=[]
app=parm.append
for(var,meth)in extr:
if var=='SRC':
if meth:app('tsk.inputs%s'%meth)
else:app('" ".join([a.path_from(cwdx) for a in tsk.inputs])')
elif var=='TGT':
if meth:app('tsk.outputs%s'%meth)
else:app('" ".join([a.path_from(cwdx) for a in tsk.outputs])')
elif meth:
if meth.startswith(':'):
if var not in dvars:
dvars.append(var)
m=meth[1:]
if m=='SRC':
m='[a.path_from(cwdx) for a in tsk.inputs]'
elif m=='TGT':
m='[a.path_from(cwdx) for a in tsk.outputs]'
elif re_novar.match(m):
m='[tsk.inputs%s]'%m[3:]
elif re_novar.match(m):
m='[tsk.outputs%s]'%m[3:]
elif m[:3]not in('tsk','gen','bld'):
dvars.append(meth[1:])
m='%r'%m
app('" ".join(tsk.colon(%r, %s))'%(var,m))
elif meth.startswith('?'):
expr=re_cond.sub(replc,meth[1:])
app('p(%r) if (%s) else ""'%(var,expr))
else:
app('%s%s'%(var,meth))
else:
if var not in dvars:
dvars.append(var)
app("p('%s')"%var)
if parm:parm="%% (%s) "%(',\n\t\t'.join(parm))
else:parm=''
c=COMPILE_TEMPLATE_SHELL%(line,parm)
Logs.debug('action: %s',c.strip().splitlines())
return(funex(c),dvars)
reg_act_noshell=re.compile(r"(?P<space>\s+)|(?P<subst>\$\{(?P<var>\w+)(?P<code>.*?)\})|(?P<text>\S+)",re.M)
def compile_fun_noshell(line):
buf=[]
dvars=[]
merge=False
app=buf.append
def replc(m):
if m.group('and'):
return' and '
elif m.group('or'):
return' or '
else:
x=m.group('var')
if x not in dvars:
dvars.append(x)
return'env[%r]'%x
for m in reg_act_noshell.finditer(line):
if m.group('space'):
merge=False
continue
elif m.group('text'):
app('[%r]'%m.group('text'))
elif m.group('subst'):
var=m.group('var')
code=m.group('code')
if var=='SRC':
if code:
app('[tsk.inputs%s]'%code)
else:
app('[a.path_from(cwdx) for a in tsk.inputs]')
elif var=='TGT':
if code:
app('[tsk.outputs%s]'%code)
else:
app('[a.path_from(cwdx) for a in tsk.outputs]')
elif code:
if code.startswith(':'):
if not var in dvars:
dvars.append(var)
m=code[1:]
if m=='SRC':
m='[a.path_from(cwdx) for a in tsk.inputs]'
elif m=='TGT':
m='[a.path_from(cwdx) for a in tsk.outputs]'
elif re_novar.match(m):
m='[tsk.inputs%s]'%m[3:]
elif re_novar.match(m):
m='[tsk.outputs%s]'%m[3:]
elif m[:3]not in('tsk','gen','bld'):
dvars.append(m)
m='%r'%m
app('tsk.colon(%r, %s)'%(var,m))
elif code.startswith('?'):
expr=re_cond.sub(replc,code[1:])
app('to_list(env[%r] if (%s) else [])'%(var,expr))
else:
app('gen.to_list(%s%s)'%(var,code))
else:
app('to_list(env[%r])'%var)
if not var in dvars:
dvars.append(var)
if merge:
tmp='merge(%s, %s)'%(buf[-2],buf[-1])
del buf[-1]
buf[-1]=tmp
merge=True
buf=['lst.extend(%s)'%x for x in buf]
fun=COMPILE_TEMPLATE_NOSHELL%"\n\t".join(buf)
Logs.debug('action: %s',fun.strip().splitlines())
return(funex(fun),dvars)
def compile_fun(line,shell=False):
if isinstance(line,str):
if line.find('<')>0 or line.find('>')>0 or line.find('&&')>0:
shell=True
else:
dvars_lst=[]
funs_lst=[]
for x in line:
if isinstance(x,str):
fun,dvars=compile_fun(x,shell)
dvars_lst+=dvars
funs_lst.append(fun)
else:
funs_lst.append(x)
def composed_fun(task):
for x in funs_lst:
ret=x(task)
if ret:
return ret
return None
return composed_fun,dvars
if shell:
return compile_fun_shell(line)
else:
return compile_fun_noshell(line)
def task_factory(name,func=None,vars=None,color='GREEN',ext_in=[],ext_out=[],before=[],after=[],shell=False,scan=None):
params={'vars':vars or[],'color':color,'name':name,'ext_in':Utils.to_list(ext_in),'ext_out':Utils.to_list(ext_out),'before':Utils.to_list(before),'after':Utils.to_list(after),'shell':shell,'scan':scan,}
if isinstance(func,str)or isinstance(func,tuple):
params['run_str']=func
else:
params['run']=func
cls=type(Task)(name,(Task,),params)
global classes
classes[name]=cls
return cls
def always_run(cls):
Logs.warn('This decorator is deprecated, set always_run on the task class instead!')
cls.always_run=True
return cls
def update_outputs(cls):
return cls
|
iCarto/siga | refs/heads/master | extScripting/scripts/jython/Lib/xml/dom/Entity.py | 3 | ########################################################################
#
# File Name: Entity.py
#
# Documentation: http://docs.4suite.com/4DOM/Entity.py.html
#
"""
Implementation of DOM Level 2 Entity interface
WWW: http://4suite.com/4DOM e-mail: support@4suite.com
Copyright (c) 2000 Fourthought Inc, USA. All Rights Reserved.
See http://4suite.com/COPYRIGHT for license and copyright information
"""
from xml.dom import Node
from DOMImplementation import implementation
from FtNode import FtNode
class Entity(FtNode):
nodeType = Node.ENTITY_NODE
_allowedChildren = [Node.ELEMENT_NODE,
Node.PROCESSING_INSTRUCTION_NODE,
Node.COMMENT_NODE,
Node.TEXT_NODE,
Node.CDATA_SECTION_NODE,
Node.ENTITY_REFERENCE_NODE
]
def __init__(self, ownerDocument, publicId, systemId, notationName):
FtNode.__init__(self, ownerDocument)
self.__dict__['__nodeName'] = '#entity'
self.__dict__['publicId'] = publicId
self.__dict__['systemId'] = systemId
self.__dict__['notationName'] = notationName
### Attribute Methods ###
def _get_systemId(self):
return self.systemId
def _get_publicId(self):
return self.publicId
def _get_notationName(self):
return self.notationName
### Overridden Methods ###
def __repr__(self):
return '<Entity Node at %x: Public="%s" System="%s" Notation="%s">' % (
id(self),
self.publicId,
self.systemId,
self.notationName)
### Helper Functions For Cloning ###
def _4dom_clone(self, owner):
return self.__class__(owner,
self.publicId,
self.systemId,
self.notationName)
def __getinitargs__(self):
return (self.ownerDocument,
self.publicId,
self.systemId,
self.notationName
)
### Attribute Access Mappings ###
_readComputedAttrs = FtNode._readComputedAttrs.copy()
_readComputedAttrs.update({'publicId':_get_publicId,
'systemId':_get_systemId,
'notationName':_get_notationName
})
_writeComputedAttrs = FtNode._writeComputedAttrs.copy()
# Create the read-only list of attributes
_readOnlyAttrs = filter(lambda k,m=_writeComputedAttrs: not m.has_key(k),
FtNode._readOnlyAttrs + _readComputedAttrs.keys())
|
drglove/SickRage | refs/heads/master | lib/requests/packages/urllib3/util/retry.py | 699 | import time
import logging
from ..exceptions import (
ConnectTimeoutError,
MaxRetryError,
ProtocolError,
ReadTimeoutError,
ResponseError,
)
from ..packages import six
log = logging.getLogger(__name__)
class Retry(object):
""" Retry configuration.
Each retry attempt will create a new Retry object with updated values, so
they can be safely reused.
Retries can be defined as a default for a pool::
retries = Retry(connect=5, read=2, redirect=5)
http = PoolManager(retries=retries)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', retries=Retry(10))
Retries can be disabled by passing ``False``::
response = http.request('GET', 'http://example.com/', retries=False)
Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
retries are disabled, in which case the causing exception will be raised.
:param int total:
Total number of retries to allow. Takes precedence over other counts.
Set to ``None`` to remove this constraint and fall back on other
counts. It's a good idea to set this to some sensibly-high value to
account for unexpected edge cases and avoid infinite retry loops.
Set to ``0`` to fail on the first retry.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int connect:
How many connection-related errors to retry on.
These are errors raised before the request is sent to the remote server,
which we assume has not triggered the server to process the request.
Set to ``0`` to fail on the first retry of this type.
:param int read:
How many times to retry on read errors.
These errors are raised after the request was sent to the server, so the
request may have side-effects.
Set to ``0`` to fail on the first retry of this type.
:param int redirect:
How many redirects to perform. Limit this to avoid infinite redirect
loops.
A redirect is a HTTP response with a status code 301, 302, 303, 307 or
308.
Set to ``0`` to fail on the first retry of this type.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param iterable method_whitelist:
Set of uppercased HTTP method verbs that we should retry on.
By default, we only retry on methods which are considered to be
indempotent (multiple requests with the same parameters end with the
same state). See :attr:`Retry.DEFAULT_METHOD_WHITELIST`.
:param iterable status_forcelist:
A set of HTTP status codes that we should force a retry on.
By default, this is disabled with ``None``.
:param float backoff_factor:
A backoff factor to apply between attempts. urllib3 will sleep for::
{backoff factor} * (2 ^ ({number of total retries} - 1))
seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
for [0.1s, 0.2s, 0.4s, ...] between retries. It will never be longer
than :attr:`Retry.MAX_BACKOFF`.
By default, backoff is disabled (set to 0).
:param bool raise_on_redirect: Whether, if the number of redirects is
exhausted, to raise a MaxRetryError, or to return a response with a
response code in the 3xx range.
"""
DEFAULT_METHOD_WHITELIST = frozenset([
'HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE'])
#: Maximum backoff time.
BACKOFF_MAX = 120
def __init__(self, total=10, connect=None, read=None, redirect=None,
method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None,
backoff_factor=0, raise_on_redirect=True, _observed_errors=0):
self.total = total
self.connect = connect
self.read = read
if redirect is False or total is False:
redirect = 0
raise_on_redirect = False
self.redirect = redirect
self.status_forcelist = status_forcelist or set()
self.method_whitelist = method_whitelist
self.backoff_factor = backoff_factor
self.raise_on_redirect = raise_on_redirect
self._observed_errors = _observed_errors # TODO: use .history instead?
def new(self, **kw):
params = dict(
total=self.total,
connect=self.connect, read=self.read, redirect=self.redirect,
method_whitelist=self.method_whitelist,
status_forcelist=self.status_forcelist,
backoff_factor=self.backoff_factor,
raise_on_redirect=self.raise_on_redirect,
_observed_errors=self._observed_errors,
)
params.update(kw)
return type(self)(**params)
@classmethod
def from_int(cls, retries, redirect=True, default=None):
""" Backwards-compatibility for the old retries format."""
if retries is None:
retries = default if default is not None else cls.DEFAULT
if isinstance(retries, Retry):
return retries
redirect = bool(redirect) and None
new_retries = cls(retries, redirect=redirect)
log.debug("Converted retries value: %r -> %r" % (retries, new_retries))
return new_retries
def get_backoff_time(self):
""" Formula for computing the current backoff
:rtype: float
"""
if self._observed_errors <= 1:
return 0
backoff_value = self.backoff_factor * (2 ** (self._observed_errors - 1))
return min(self.BACKOFF_MAX, backoff_value)
def sleep(self):
""" Sleep between retry attempts using an exponential backoff.
By default, the backoff factor is 0 and this method will return
immediately.
"""
backoff = self.get_backoff_time()
if backoff <= 0:
return
time.sleep(backoff)
def _is_connection_error(self, err):
""" Errors when we're fairly sure that the server did not receive the
request, so it should be safe to retry.
"""
return isinstance(err, ConnectTimeoutError)
def _is_read_error(self, err):
""" Errors that occur after the request has been started, so we should
assume that the server began processing it.
"""
return isinstance(err, (ReadTimeoutError, ProtocolError))
def is_forced_retry(self, method, status_code):
""" Is this method/status code retryable? (Based on method/codes whitelists)
"""
if self.method_whitelist and method.upper() not in self.method_whitelist:
return False
return self.status_forcelist and status_code in self.status_forcelist
def is_exhausted(self):
""" Are we out of retries? """
retry_counts = (self.total, self.connect, self.read, self.redirect)
retry_counts = list(filter(None, retry_counts))
if not retry_counts:
return False
return min(retry_counts) < 0
def increment(self, method=None, url=None, response=None, error=None, _pool=None, _stacktrace=None):
""" Return a new Retry object with incremented retry counters.
:param response: A response object, or None, if the server did not
return a response.
:type response: :class:`~urllib3.response.HTTPResponse`
:param Exception error: An error encountered during the request, or
None if the response was received successfully.
:return: A new ``Retry`` object.
"""
if self.total is False and error:
# Disabled, indicate to re-raise the error.
raise six.reraise(type(error), error, _stacktrace)
total = self.total
if total is not None:
total -= 1
_observed_errors = self._observed_errors
connect = self.connect
read = self.read
redirect = self.redirect
cause = 'unknown'
if error and self._is_connection_error(error):
# Connect retry?
if connect is False:
raise six.reraise(type(error), error, _stacktrace)
elif connect is not None:
connect -= 1
_observed_errors += 1
elif error and self._is_read_error(error):
# Read retry?
if read is False:
raise six.reraise(type(error), error, _stacktrace)
elif read is not None:
read -= 1
_observed_errors += 1
elif response and response.get_redirect_location():
# Redirect retry?
if redirect is not None:
redirect -= 1
cause = 'too many redirects'
else:
# Incrementing because of a server error like a 500 in
# status_forcelist and a the given method is in the whitelist
_observed_errors += 1
cause = ResponseError.GENERIC_ERROR
if response and response.status:
cause = ResponseError.SPECIFIC_ERROR.format(
status_code=response.status)
new_retry = self.new(
total=total,
connect=connect, read=read, redirect=redirect,
_observed_errors=_observed_errors)
if new_retry.is_exhausted():
raise MaxRetryError(_pool, url, error or ResponseError(cause))
log.debug("Incremented Retry for (url='%s'): %r" % (url, new_retry))
return new_retry
def __repr__(self):
return ('{cls.__name__}(total={self.total}, connect={self.connect}, '
'read={self.read}, redirect={self.redirect})').format(
cls=type(self), self=self)
# For backwards compatibility (equivalent to pre-v1.9):
Retry.DEFAULT = Retry(3)
|
edhuckle/statsmodels | refs/heads/master | statsmodels/nonparametric/_kernel_base.py | 29 | """
Module containing the base object for multivariate kernel density and
regression, plus some utilities.
"""
from statsmodels.compat.python import range, string_types
import copy
import numpy as np
from scipy import optimize
from scipy.stats.mstats import mquantiles
try:
import joblib
has_joblib = True
except ImportError:
has_joblib = False
from . import kernels
kernel_func = dict(wangryzin=kernels.wang_ryzin,
aitchisonaitken=kernels.aitchison_aitken,
gaussian=kernels.gaussian,
aitchison_aitken_reg = kernels.aitchison_aitken_reg,
wangryzin_reg = kernels.wang_ryzin_reg,
gauss_convolution=kernels.gaussian_convolution,
wangryzin_convolution=kernels.wang_ryzin_convolution,
aitchisonaitken_convolution=kernels.aitchison_aitken_convolution,
gaussian_cdf=kernels.gaussian_cdf,
aitchisonaitken_cdf=kernels.aitchison_aitken_cdf,
wangryzin_cdf=kernels.wang_ryzin_cdf,
d_gaussian=kernels.d_gaussian)
def _compute_min_std_IQR(data):
"""Compute minimum of std and IQR for each variable."""
s1 = np.std(data, axis=0)
q75 = mquantiles(data, 0.75, axis=0).data[0]
q25 = mquantiles(data, 0.25, axis=0).data[0]
s2 = (q75 - q25) / 1.349 # IQR
dispersion = np.minimum(s1, s2)
return dispersion
def _compute_subset(class_type, data, bw, co, do, n_cvars, ix_ord,
ix_unord, n_sub, class_vars, randomize, bound):
""""Compute bw on subset of data.
Called from ``GenericKDE._compute_efficient_*``.
Notes
-----
Needs to be outside the class in order for joblib to be able to pickle it.
"""
if randomize:
np.random.shuffle(data)
sub_data = data[:n_sub, :]
else:
sub_data = data[bound[0]:bound[1], :]
if class_type == 'KDEMultivariate':
from .kernel_density import KDEMultivariate
var_type = class_vars[0]
sub_model = KDEMultivariate(sub_data, var_type, bw=bw,
defaults=EstimatorSettings(efficient=False))
elif class_type == 'KDEMultivariateConditional':
from .kernel_density import KDEMultivariateConditional
k_dep, dep_type, indep_type = class_vars
endog = sub_data[:, :k_dep]
exog = sub_data[:, k_dep:]
sub_model = KDEMultivariateConditional(endog, exog, dep_type,
indep_type, bw=bw, defaults=EstimatorSettings(efficient=False))
elif class_type == 'KernelReg':
from .kernel_regression import KernelReg
var_type, k_vars, reg_type = class_vars
endog = _adjust_shape(sub_data[:, 0], 1)
exog = _adjust_shape(sub_data[:, 1:], k_vars)
sub_model = KernelReg(endog=endog, exog=exog, reg_type=reg_type,
var_type=var_type, bw=bw,
defaults=EstimatorSettings(efficient=False))
else:
raise ValueError("class_type not recognized, should be one of " \
"{KDEMultivariate, KDEMultivariateConditional, KernelReg}")
# Compute dispersion in next 4 lines
if class_type == 'KernelReg':
sub_data = sub_data[:, 1:]
dispersion = _compute_min_std_IQR(sub_data)
fct = dispersion * n_sub**(-1. / (n_cvars + co))
fct[ix_unord] = n_sub**(-2. / (n_cvars + do))
fct[ix_ord] = n_sub**(-2. / (n_cvars + do))
sample_scale_sub = sub_model.bw / fct #TODO: check if correct
bw_sub = sub_model.bw
return sample_scale_sub, bw_sub
class GenericKDE (object):
"""
Base class for density estimation and regression KDE classes.
"""
def _compute_bw(self, bw):
"""
Computes the bandwidth of the data.
Parameters
----------
bw: array_like or str
If array_like: user-specified bandwidth.
If a string, should be one of:
- cv_ml: cross validation maximum likelihood
- normal_reference: normal reference rule of thumb
- cv_ls: cross validation least squares
Notes
-----
The default values for bw is 'normal_reference'.
"""
self.bw_func = dict(normal_reference=self._normal_reference,
cv_ml=self._cv_ml, cv_ls=self._cv_ls)
if bw is None:
bwfunc = self.bw_func['normal_reference']
return bwfunc()
if not isinstance(bw, string_types):
self._bw_method = "user-specified"
res = np.asarray(bw)
else:
# The user specified a bandwidth selection method
self._bw_method = bw
bwfunc = self.bw_func[bw]
res = bwfunc()
return res
def _compute_dispersion(self, data):
"""
Computes the measure of dispersion.
The minimum of the standard deviation and interquartile range / 1.349
Notes
-----
Reimplemented in `KernelReg`, because the first column of `data` has to
be removed.
References
----------
See the user guide for the np package in R.
In the notes on bwscaling option in npreg, npudens, npcdens there is
a discussion on the measure of dispersion
"""
return _compute_min_std_IQR(data)
def _get_class_vars_type(self):
"""Helper method to be able to pass needed vars to _compute_subset.
Needs to be implemented by subclasses."""
pass
def _compute_efficient(self, bw):
"""
Computes the bandwidth by estimating the scaling factor (c)
in n_res resamples of size ``n_sub`` (in `randomize` case), or by
dividing ``nobs`` into as many ``n_sub`` blocks as needed (if
`randomize` is False).
References
----------
See p.9 in socserv.mcmaster.ca/racine/np_faq.pdf
"""
if bw is None:
self._bw_method = 'normal_reference'
if isinstance(bw, string_types):
self._bw_method = bw
else:
self._bw_method = "user-specified"
return bw
nobs = self.nobs
n_sub = self.n_sub
data = copy.deepcopy(self.data)
n_cvars = self.data_type.count('c')
co = 4 # 2*order of continuous kernel
do = 4 # 2*order of discrete kernel
_, ix_ord, ix_unord = _get_type_pos(self.data_type)
# Define bounds for slicing the data
if self.randomize:
# randomize chooses blocks of size n_sub, independent of nobs
bounds = [None] * self.n_res
else:
bounds = [(i * n_sub, (i+1) * n_sub) for i in range(nobs // n_sub)]
if nobs % n_sub > 0:
bounds.append((nobs - nobs % n_sub, nobs))
n_blocks = self.n_res if self.randomize else len(bounds)
sample_scale = np.empty((n_blocks, self.k_vars))
only_bw = np.empty((n_blocks, self.k_vars))
class_type, class_vars = self._get_class_vars_type()
if has_joblib:
# `res` is a list of tuples (sample_scale_sub, bw_sub)
res = joblib.Parallel(n_jobs=self.n_jobs) \
(joblib.delayed(_compute_subset) \
(class_type, data, bw, co, do, n_cvars, ix_ord, ix_unord, \
n_sub, class_vars, self.randomize, bounds[i]) \
for i in range(n_blocks))
else:
res = []
for i in range(n_blocks):
res.append(_compute_subset(class_type, data, bw, co, do,
n_cvars, ix_ord, ix_unord, n_sub,
class_vars, self.randomize,
bounds[i]))
for i in range(n_blocks):
sample_scale[i, :] = res[i][0]
only_bw[i, :] = res[i][1]
s = self._compute_dispersion(data)
order_func = np.median if self.return_median else np.mean
m_scale = order_func(sample_scale, axis=0)
# TODO: Check if 1/5 is correct in line below!
bw = m_scale * s * nobs**(-1. / (n_cvars + co))
bw[ix_ord] = m_scale[ix_ord] * nobs**(-2./ (n_cvars + do))
bw[ix_unord] = m_scale[ix_unord] * nobs**(-2./ (n_cvars + do))
if self.return_only_bw:
bw = np.median(only_bw, axis=0)
return bw
def _set_defaults(self, defaults):
"""Sets the default values for the efficient estimation"""
self.n_res = defaults.n_res
self.n_sub = defaults.n_sub
self.randomize = defaults.randomize
self.return_median = defaults.return_median
self.efficient = defaults.efficient
self.return_only_bw = defaults.return_only_bw
self.n_jobs = defaults.n_jobs
def _normal_reference(self):
"""
Returns Scott's normal reference rule of thumb bandwidth parameter.
Notes
-----
See p.13 in [2] for an example and discussion. The formula for the
bandwidth is
.. math:: h = 1.06n^{-1/(4+q)}
where ``n`` is the number of observations and ``q`` is the number of
variables.
"""
X = np.std(self.data, axis=0)
return 1.06 * X * self.nobs ** (- 1. / (4 + self.data.shape[1]))
def _set_bw_bounds(self, bw):
"""
Sets bandwidth lower bound to effectively zero )1e-10), and for
discrete values upper bound to 1.
"""
bw[bw < 0] = 1e-10
_, ix_ord, ix_unord = _get_type_pos(self.data_type)
bw[ix_ord] = np.minimum(bw[ix_ord], 1.)
bw[ix_unord] = np.minimum(bw[ix_unord], 1.)
return bw
def _cv_ml(self):
"""
Returns the cross validation maximum likelihood bandwidth parameter.
Notes
-----
For more details see p.16, 18, 27 in Ref. [1] (see module docstring).
Returns the bandwidth estimate that maximizes the leave-out-out
likelihood. The leave-one-out log likelihood function is:
.. math:: \ln L=\sum_{i=1}^{n}\ln f_{-i}(X_{i})
The leave-one-out kernel estimator of :math:`f_{-i}` is:
.. math:: f_{-i}(X_{i})=\frac{1}{(n-1)h}
\sum_{j=1,j\neq i}K_{h}(X_{i},X_{j})
where :math:`K_{h}` represents the Generalized product kernel
estimator:
.. math:: K_{h}(X_{i},X_{j})=\prod_{s=1}^
{q}h_{s}^{-1}k\left(\frac{X_{is}-X_{js}}{h_{s}}\right)
"""
# the initial value for the optimization is the normal_reference
h0 = self._normal_reference()
bw = optimize.fmin(self.loo_likelihood, x0=h0, args=(np.log, ),
maxiter=1e3, maxfun=1e3, disp=0, xtol=1e-3)
bw = self._set_bw_bounds(bw) # bound bw if necessary
return bw
def _cv_ls(self):
"""
Returns the cross-validation least squares bandwidth parameter(s).
Notes
-----
For more details see pp. 16, 27 in Ref. [1] (see module docstring).
Returns the value of the bandwidth that maximizes the integrated mean
square error between the estimated and actual distribution. The
integrated mean square error (IMSE) is given by:
.. math:: \int\left[\hat{f}(x)-f(x)\right]^{2}dx
This is the general formula for the IMSE. The IMSE differs for
conditional (``KDEMultivariateConditional``) and unconditional
(``KDEMultivariate``) kernel density estimation.
"""
h0 = self._normal_reference()
bw = optimize.fmin(self.imse, x0=h0, maxiter=1e3, maxfun=1e3, disp=0,
xtol=1e-3)
bw = self._set_bw_bounds(bw) # bound bw if necessary
return bw
def loo_likelihood(self):
raise NotImplementedError
class EstimatorSettings(object):
"""
Object to specify settings for density estimation or regression.
`EstimatorSettings` has several proporties related to how bandwidth
estimation for the `KDEMultivariate`, `KDEMultivariateConditional`,
`KernelReg` and `CensoredKernelReg` classes behaves.
Parameters
----------
efficient: bool, optional
If True, the bandwidth estimation is to be performed
efficiently -- by taking smaller sub-samples and estimating
the scaling factor of each subsample. This is useful for large
samples (nobs >> 300) and/or multiple variables (k_vars > 3).
If False (default), all data is used at the same time.
randomize: bool, optional
If True, the bandwidth estimation is to be performed by
taking `n_res` random resamples (with replacement) of size `n_sub` from
the full sample. If set to False (default), the estimation is
performed by slicing the full sample in sub-samples of size `n_sub` so
that all samples are used once.
n_sub: int, optional
Size of the sub-samples. Default is 50.
n_res: int, optional
The number of random re-samples used to estimate the bandwidth.
Only has an effect if ``randomize == True``. Default value is 25.
return_median: bool, optional
If True (default), the estimator uses the median of all scaling factors
for each sub-sample to estimate the bandwidth of the full sample.
If False, the estimator uses the mean.
return_only_bw: bool, optional
If True, the estimator is to use the bandwidth and not the
scaling factor. This is *not* theoretically justified.
Should be used only for experimenting.
n_jobs : int, optional
The number of jobs to use for parallel estimation with
``joblib.Parallel``. Default is -1, meaning ``n_cores - 1``, with
``n_cores`` the number of available CPU cores.
See the `joblib documentation
<https://pythonhosted.org/joblib/parallel.html>`_ for more details.
Examples
--------
>>> settings = EstimatorSettings(randomize=True, n_jobs=3)
>>> k_dens = KDEMultivariate(data, var_type, defaults=settings)
"""
def __init__(self, efficient=False, randomize=False, n_res=25, n_sub=50,
return_median=True, return_only_bw=False, n_jobs=-1):
self.efficient = efficient
self.randomize = randomize
self.n_res = n_res
self.n_sub = n_sub
self.return_median = return_median
self.return_only_bw = return_only_bw # TODO: remove this?
self.n_jobs = n_jobs
class LeaveOneOut(object):
"""
Generator to give leave-one-out views on X.
Parameters
----------
X : array-like
2-D array.
Examples
--------
>>> X = np.random.normal(0, 1, [10,2])
>>> loo = LeaveOneOut(X)
>>> for x in loo:
... print x
Notes
-----
A little lighter weight than sklearn LOO. We don't need test index.
Also passes views on X, not the index.
"""
def __init__(self, X):
self.X = np.asarray(X)
def __iter__(self):
X = self.X
nobs, k_vars = np.shape(X)
for i in range(nobs):
index = np.ones(nobs, dtype=np.bool)
index[i] = False
yield X[index, :]
def _get_type_pos(var_type):
ix_cont = np.array([c == 'c' for c in var_type])
ix_ord = np.array([c == 'o' for c in var_type])
ix_unord = np.array([c == 'u' for c in var_type])
return ix_cont, ix_ord, ix_unord
def _adjust_shape(dat, k_vars):
""" Returns an array of shape (nobs, k_vars) for use with `gpke`."""
dat = np.asarray(dat)
if dat.ndim > 2:
dat = np.squeeze(dat)
if dat.ndim == 1 and k_vars > 1: # one obs many vars
nobs = 1
elif dat.ndim == 1 and k_vars == 1: # one obs one var
nobs = len(dat)
else:
if np.shape(dat)[0] == k_vars and np.shape(dat)[1] != k_vars:
dat = dat.T
nobs = np.shape(dat)[0] # ndim >1 so many obs many vars
dat = np.reshape(dat, (nobs, k_vars))
return dat
def gpke(bw, data, data_predict, var_type, ckertype='gaussian',
okertype='wangryzin', ukertype='aitchisonaitken', tosum=True):
"""
Returns the non-normalized Generalized Product Kernel Estimator
Parameters
----------
bw: 1-D ndarray
The user-specified bandwidth parameters.
data: 1D or 2-D ndarray
The training data.
data_predict: 1-D ndarray
The evaluation points at which the kernel estimation is performed.
var_type: str, optional
The variable type (continuous, ordered, unordered).
ckertype: str, optional
The kernel used for the continuous variables.
okertype: str, optional
The kernel used for the ordered discrete variables.
ukertype: str, optional
The kernel used for the unordered discrete variables.
tosum : bool, optional
Whether or not to sum the calculated array of densities. Default is
True.
Returns
-------
dens: array-like
The generalized product kernel density estimator.
Notes
-----
The formula for the multivariate kernel estimator for the pdf is:
.. math:: f(x)=\frac{1}{nh_{1}...h_{q}}\sum_{i=1}^
{n}K\left(\frac{X_{i}-x}{h}\right)
where
.. math:: K\left(\frac{X_{i}-x}{h}\right) =
k\left( \frac{X_{i1}-x_{1}}{h_{1}}\right)\times
k\left( \frac{X_{i2}-x_{2}}{h_{2}}\right)\times...\times
k\left(\frac{X_{iq}-x_{q}}{h_{q}}\right)
"""
kertypes = dict(c=ckertype, o=okertype, u=ukertype)
#Kval = []
#for ii, vtype in enumerate(var_type):
# func = kernel_func[kertypes[vtype]]
# Kval.append(func(bw[ii], data[:, ii], data_predict[ii]))
#Kval = np.column_stack(Kval)
Kval = np.empty(data.shape)
for ii, vtype in enumerate(var_type):
func = kernel_func[kertypes[vtype]]
Kval[:, ii] = func(bw[ii], data[:, ii], data_predict[ii])
iscontinuous = np.array([c == 'c' for c in var_type])
dens = Kval.prod(axis=1) / np.prod(bw[iscontinuous])
if tosum:
return dens.sum(axis=0)
else:
return dens
|
tedelhourani/ansible | refs/heads/devel | lib/ansible/modules/cloud/azure/azure_rm_functionapp_facts.py | 44 | #!/usr/bin/python
#
# Copyright (c) 2016 Thomas Stringer, <tomstr@microsoft.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_functionapp_facts
version_added: "2.4"
short_description: Get Azure Function App facts
description:
- Get facts for one Azure Function App or all Function Apps within a resource group
options:
name:
description:
- Only show results for a specific Function App
required: false
default: null
resource_group:
description:
- Limit results to a resource group. Required when filtering by name
required: false
default: null
aliases:
- resource_group_name
tags:
description:
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
required: false
default: null
extends_documentation_fragment:
- azure
author:
- "Thomas Stringer (@tstringer)"
'''
EXAMPLES = '''
- name: Get facts for one Function App
azure_rm_functionapp_facts:
resource_group: ansible-rg
name: myfunctionapp
- name: Get facts for all Function Apps in a resource group
azure_rm_functionapp_facts:
resource_group: ansible-rg
- name: Get facts for all Function Apps by tags
azure_rm_functionapp_facts:
tags:
- testing
'''
RETURN = '''
azure_functionapps:
description: List of Azure Function Apps dicts
returned: always
type: list
example:
id: /subscriptions/.../resourceGroups/ansible-rg/providers/Microsoft.Web/sites/myfunctionapp
name: myfunctionapp
kind: functionapp
location: East US
type: Microsoft.Web/sites
state: Running
host_names:
- myfunctionapp.azurewebsites.net
repository_site_name: myfunctionapp
usage_state: Normal
enabled: true
enabled_host_names:
- myfunctionapp.azurewebsites.net
- myfunctionapp.scm.azurewebsites.net
availability_state: Normal
host_name_ssl_states:
- name: myfunctionapp.azurewebsites.net
ssl_state: Disabled
host_type: Standard
- name: myfunctionapp.scm.azurewebsites.net
ssl_state: Disabled
host_type: Repository
server_farm_id: /subscriptions/.../resourceGroups/ansible-rg/providers/Microsoft.Web/serverfarms/EastUSPlan
reserved: false
last_modified_time_utc: 2017-08-22T18:54:01.190Z
scm_site_also_stopped: false
client_affinity_enabled: true
client_cert_enabled: false
host_names_disabled: false
outbound_ip_addresses: ............
container_size: 1536
daily_memory_time_quota: 0
resource_group: ansible-rg
default_host_name: myfunctionapp.azurewebsites.net
'''
try:
from msrestazure.azure_exceptions import CloudError
except:
# This is handled in azure_rm_common
pass
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
class AzureRMFunctionAppFacts(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
name=dict(type='str'),
resource_group=dict(type='str', aliases=['resource_group_name']),
tags=dict(type='list'),
)
self.results = dict(
changed=False,
ansible_facts=dict(azure_functionapps=[])
)
self.name = None
self.resource_group = None
self.tags = None
super(AzureRMFunctionAppFacts, self).__init__(
self.module_arg_spec,
supports_tags=False,
facts_module=True
)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if self.name and not self.resource_group:
self.fail("Parameter error: resource group required when filtering by name.")
if self.name:
self.results['ansible_facts']['azure_functionapps'] = self.get_functionapp()
elif self.resource_group:
self.results['ansible_facts']['azure_functionapps'] = self.list_resource_group()
else:
self.results['ansible_facts']['azure_functionapps'] = self.list_all()
return self.results
def get_functionapp(self):
self.log('Get properties for Function App {0}'.format(self.name))
function_app = None
result = []
try:
function_app = self.web_client.web_apps.get(
self.resource_group,
self.name
)
except CloudError:
pass
if function_app and self.has_tags(function_app.tags, self.tags):
result = function_app.as_dict()
return [result]
def list_resource_group(self):
self.log('List items')
try:
response = self.web_client.web_apps.list_by_resource_group(self.resource_group)
except Exception as exc:
self.fail("Error listing for resource group {0} - {1}".format(self.resource_group, str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(item.as_dict())
return results
def list_all(self):
self.log('List all items')
try:
response = self.web_client.web_apps.list_by_resource_group(self.resource_group)
except Exception as exc:
self.fail("Error listing all items - {0}".format(str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(item.as_dict())
return results
def main():
AzureRMFunctionAppFacts()
if __name__ == '__main__':
main()
|
chongtianfeiyu/kbengine | refs/heads/master | kbe/res/scripts/common/Lib/ctypes/test/__init__.py | 101 | import os
import unittest
from test import support
# skip tests if _ctypes was not built
ctypes = support.import_module('ctypes')
ctypes_symbols = dir(ctypes)
def need_symbol(name):
return unittest.skipUnless(name in ctypes_symbols,
'{!r} is required'.format(name))
def load_tests(*args):
return support.load_package_tests(os.path.dirname(__file__), *args)
|
jaysonsantos/servo | refs/heads/master | tests/wpt/css-tests/tools/manifest/vcs.py | 287 | import os
import subprocess
def get_git_func(repo_path):
def git(cmd, *args):
full_cmd = ["git", cmd] + list(args)
return subprocess.check_output(full_cmd, cwd=repo_path, stderr=subprocess.STDOUT)
return git
def is_git_repo(tests_root):
return os.path.exists(os.path.join(tests_root, ".git"))
_repo_root = None
def get_repo_root(initial_dir=None):
global _repo_root
if initial_dir is None:
initial_dir = os.path.dirname(__file__)
if _repo_root is None:
git = get_git_func(initial_dir)
_repo_root = git("rev-parse", "--show-toplevel").rstrip()
return _repo_root
|
Canpio/Paddle | refs/heads/develop | python/paddle/fluid/tests/unittests/test_recordio_reader.py | 3 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import paddle.fluid as fluid
import paddle.v2 as paddle
import paddle.v2.dataset.mnist as mnist
class TestRecordIO(unittest.TestCase):
def setUp(self):
# Convert mnist to recordio file
with fluid.program_guard(fluid.Program(), fluid.Program()):
reader = paddle.batch(mnist.train(), batch_size=32)
feeder = fluid.DataFeeder(
feed_list=[ # order is image and label
fluid.layers.data(
name='image', shape=[784]),
fluid.layers.data(
name='label', shape=[1], dtype='int64'),
],
place=fluid.CPUPlace())
self.num_batches = fluid.recordio_writer.convert_reader_to_recordio_file(
'./mnist.recordio', reader, feeder)
def test_main(self, decorator_callback=None):
# use new program
with fluid.program_guard(fluid.Program(), fluid.Program()):
data_file = fluid.layers.open_recordio_file(
'./mnist.recordio',
shapes=[[-1, 784], [-1, 1]],
lod_levels=[0, 0],
dtypes=['float32', 'int64'])
if decorator_callback is not None:
data_file = decorator_callback(data_file)
img, label = fluid.layers.read_file(data_file)
hidden = fluid.layers.fc(input=img, size=100, act='tanh')
prediction = fluid.layers.fc(input=hidden, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label)
avg_loss = fluid.layers.mean(loss)
fluid.optimizer.Adam(learning_rate=1e-3).minimize(avg_loss)
if fluid.core.is_compiled_with_cuda():
place = fluid.CUDAPlace(0)
else:
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
avg_loss_np = []
# train a pass
batch_id = 0
while True:
try:
tmp, = exe.run(fetch_list=[avg_loss])
except fluid.core.EnforceNotMet as ex:
self.assertIn("There is no next data.", ex.message)
break
avg_loss_np.append(tmp)
batch_id += 1
self.assertEqual(batch_id, self.num_batches)
self.assertLess(avg_loss_np[-1], avg_loss_np[0])
def test_shuffle_reader(self):
self.test_main(decorator_callback=lambda reader: fluid.layers.io.shuffle(
reader, buffer_size=200))
def test_double_buffer_reader(self):
self.test_main(decorator_callback=lambda reader: fluid.layers.io.double_buffer(reader,
place='cuda:0' if fluid.core.is_compiled_with_cuda() else 'cpu'))
|
antotodd/project2 | refs/heads/master | lib/flask/testsuite/test_apps/path/installed_package/__init__.py | 1799 | import flask
app = flask.Flask(__name__)
|
jdcc2/campussearch | refs/heads/master | venv/lib/python3.4/site-packages/pip/_vendor/requests/packages/chardet/chardistribution.py | 2754 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .euctwfreq import (EUCTWCharToFreqOrder, EUCTW_TABLE_SIZE,
EUCTW_TYPICAL_DISTRIBUTION_RATIO)
from .euckrfreq import (EUCKRCharToFreqOrder, EUCKR_TABLE_SIZE,
EUCKR_TYPICAL_DISTRIBUTION_RATIO)
from .gb2312freq import (GB2312CharToFreqOrder, GB2312_TABLE_SIZE,
GB2312_TYPICAL_DISTRIBUTION_RATIO)
from .big5freq import (Big5CharToFreqOrder, BIG5_TABLE_SIZE,
BIG5_TYPICAL_DISTRIBUTION_RATIO)
from .jisfreq import (JISCharToFreqOrder, JIS_TABLE_SIZE,
JIS_TYPICAL_DISTRIBUTION_RATIO)
from .compat import wrap_ord
ENOUGH_DATA_THRESHOLD = 1024
SURE_YES = 0.99
SURE_NO = 0.01
MINIMUM_DATA_THRESHOLD = 3
class CharDistributionAnalysis:
def __init__(self):
# Mapping table to get frequency order from char order (get from
# GetOrder())
self._mCharToFreqOrder = None
self._mTableSize = None # Size of above table
# This is a constant value which varies from language to language,
# used in calculating confidence. See
# http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html
# for further detail.
self._mTypicalDistributionRatio = None
self.reset()
def reset(self):
"""reset analyser, clear any state"""
# If this flag is set to True, detection is done and conclusion has
# been made
self._mDone = False
self._mTotalChars = 0 # Total characters encountered
# The number of characters whose frequency order is less than 512
self._mFreqChars = 0
def feed(self, aBuf, aCharLen):
"""feed a character with known length"""
if aCharLen == 2:
# we only care about 2-bytes character in our distribution analysis
order = self.get_order(aBuf)
else:
order = -1
if order >= 0:
self._mTotalChars += 1
# order is valid
if order < self._mTableSize:
if 512 > self._mCharToFreqOrder[order]:
self._mFreqChars += 1
def get_confidence(self):
"""return confidence based on existing data"""
# if we didn't receive any character in our consideration range,
# return negative answer
if self._mTotalChars <= 0 or self._mFreqChars <= MINIMUM_DATA_THRESHOLD:
return SURE_NO
if self._mTotalChars != self._mFreqChars:
r = (self._mFreqChars / ((self._mTotalChars - self._mFreqChars)
* self._mTypicalDistributionRatio))
if r < SURE_YES:
return r
# normalize confidence (we don't want to be 100% sure)
return SURE_YES
def got_enough_data(self):
# It is not necessary to receive all data to draw conclusion.
# For charset detection, certain amount of data is enough
return self._mTotalChars > ENOUGH_DATA_THRESHOLD
def get_order(self, aBuf):
# We do not handle characters based on the original encoding string,
# but convert this encoding string to a number, here called order.
# This allows multiple encodings of a language to share one frequency
# table.
return -1
class EUCTWDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCTWCharToFreqOrder
self._mTableSize = EUCTW_TABLE_SIZE
self._mTypicalDistributionRatio = EUCTW_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-TW encoding, we are interested
# first byte range: 0xc4 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xC4:
return 94 * (first_char - 0xC4) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class EUCKRDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCKRCharToFreqOrder
self._mTableSize = EUCKR_TABLE_SIZE
self._mTypicalDistributionRatio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-KR encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xB0:
return 94 * (first_char - 0xB0) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class GB2312DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = GB2312CharToFreqOrder
self._mTableSize = GB2312_TABLE_SIZE
self._mTypicalDistributionRatio = GB2312_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for GB2312 encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0xB0) and (second_char >= 0xA1):
return 94 * (first_char - 0xB0) + second_char - 0xA1
else:
return -1
class Big5DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = Big5CharToFreqOrder
self._mTableSize = BIG5_TABLE_SIZE
self._mTypicalDistributionRatio = BIG5_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for big5 encoding, we are interested
# first byte range: 0xa4 -- 0xfe
# second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if first_char >= 0xA4:
if second_char >= 0xA1:
return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63
else:
return 157 * (first_char - 0xA4) + second_char - 0x40
else:
return -1
class SJISDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for sjis encoding, we are interested
# first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe
# second byte range: 0x40 -- 0x7e, 0x81 -- oxfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0x81) and (first_char <= 0x9F):
order = 188 * (first_char - 0x81)
elif (first_char >= 0xE0) and (first_char <= 0xEF):
order = 188 * (first_char - 0xE0 + 31)
else:
return -1
order = order + second_char - 0x40
if second_char > 0x7F:
order = -1
return order
class EUCJPDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-JP encoding, we are interested
# first byte range: 0xa0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
char = wrap_ord(aBuf[0])
if char >= 0xA0:
return 94 * (char - 0xA1) + wrap_ord(aBuf[1]) - 0xa1
else:
return -1
|
utecuy/edx-platform | refs/heads/master | openedx/core/lib/logsettings.py | 127 | """Get log settings."""
import os
import platform
import sys
from logging.handlers import SysLogHandler
LOG_LEVELS = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']
def get_logger_config(log_dir,
logging_env="no_env",
tracking_filename="tracking.log",
edx_filename="edx.log",
dev_env=False,
syslog_addr=None,
debug=False,
local_loglevel='INFO',
console_loglevel=None,
service_variant=None):
"""
Return the appropriate logging config dictionary. You should assign the
result of this to the LOGGING var in your settings. The reason it's done
this way instead of registering directly is because I didn't want to worry
about resetting the logging state if this is called multiple times when
settings are extended.
If dev_env is set to true logging will not be done via local rsyslogd,
instead, tracking and application logs will be dropped in log_dir.
"tracking_filename" and "edx_filename" are ignored unless dev_env
is set to true since otherwise logging is handled by rsyslogd.
"""
# Revert to INFO if an invalid string is passed in
if local_loglevel not in LOG_LEVELS:
local_loglevel = 'INFO'
if console_loglevel is None or console_loglevel not in LOG_LEVELS:
console_loglevel = 'DEBUG' if debug else 'INFO'
if service_variant is None:
# default to a blank string so that if SERVICE_VARIANT is not
# set we will not log to a sub directory
service_variant = ''
hostname = platform.node().split(".")[0]
syslog_format = ("[service_variant={service_variant}]"
"[%(name)s][env:{logging_env}] %(levelname)s "
"[{hostname} %(process)d] [%(filename)s:%(lineno)d] "
"- %(message)s").format(service_variant=service_variant,
logging_env=logging_env,
hostname=hostname)
handlers = ['console', 'local']
if syslog_addr:
handlers.append('syslogger-remote')
logger_config = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s %(levelname)s %(process)d '
'[%(name)s] %(filename)s:%(lineno)d - %(message)s',
},
'syslog_format': {'format': syslog_format},
'raw': {'format': '%(message)s'},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
}
},
'handlers': {
'console': {
'level': console_loglevel,
'class': 'logging.StreamHandler',
'formatter': 'standard',
'stream': sys.stderr,
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'newrelic': {
'level': 'ERROR',
'class': 'lms.lib.newrelic_logging.NewRelicHandler',
'formatter': 'raw',
}
},
'loggers': {
'tracking': {
'handlers': ['tracking'],
'level': 'DEBUG',
'propagate': False,
},
'': {
'handlers': handlers,
'level': 'DEBUG',
'propagate': False
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
if syslog_addr:
logger_config['handlers'].update({
'syslogger-remote': {
'level': 'INFO',
'class': 'logging.handlers.SysLogHandler',
'address': syslog_addr,
'formatter': 'syslog_format',
},
})
if dev_env:
tracking_file_loc = os.path.join(log_dir, tracking_filename)
edx_file_loc = os.path.join(log_dir, edx_filename)
logger_config['handlers'].update({
'local': {
'class': 'logging.handlers.RotatingFileHandler',
'level': local_loglevel,
'formatter': 'standard',
'filename': edx_file_loc,
'maxBytes': 1024 * 1024 * 2,
'backupCount': 5,
},
'tracking': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': tracking_file_loc,
'formatter': 'raw',
'maxBytes': 1024 * 1024 * 2,
'backupCount': 5,
},
})
else:
# for production environments we will only
# log INFO and up
logger_config['loggers']['']['level'] = 'INFO'
logger_config['handlers'].update({
'local': {
'level': local_loglevel,
'class': 'logging.handlers.SysLogHandler',
'address': '/dev/log',
'formatter': 'syslog_format',
'facility': SysLogHandler.LOG_LOCAL0,
},
'tracking': {
'level': 'DEBUG',
'class': 'logging.handlers.SysLogHandler',
'address': '/dev/log',
'facility': SysLogHandler.LOG_LOCAL1,
'formatter': 'raw',
},
})
return logger_config
|
lz1988/django-web | refs/heads/master | build/lib/django/contrib/staticfiles/handlers.py | 106 | try:
from urllib.parse import urlparse
from urllib.request import url2pathname
except ImportError: # Python 2
from urllib import url2pathname
from urlparse import urlparse
from django.conf import settings
from django.core.handlers.base import get_path_info
from django.core.handlers.wsgi import WSGIHandler
from django.contrib.staticfiles import utils
from django.contrib.staticfiles.views import serve
class StaticFilesHandler(WSGIHandler):
"""
WSGI middleware that intercepts calls to the static files directory, as
defined by the STATIC_URL setting, and serves those files.
"""
def __init__(self, application, base_dir=None):
self.application = application
if base_dir:
self.base_dir = base_dir
else:
self.base_dir = self.get_base_dir()
self.base_url = urlparse(self.get_base_url())
super(StaticFilesHandler, self).__init__()
def get_base_dir(self):
return settings.STATIC_ROOT
def get_base_url(self):
utils.check_settings()
return settings.STATIC_URL
def _should_handle(self, path):
"""
Checks if the path should be handled. Ignores the path if:
* the host is provided as part of the base_url
* the request's path isn't under the media path (or equal)
"""
return path.startswith(self.base_url[2]) and not self.base_url[1]
def file_path(self, url):
"""
Returns the relative path to the media file on disk for the given URL.
"""
relative_url = url[len(self.base_url[2]):]
return url2pathname(relative_url)
def serve(self, request):
"""
Actually serves the request path.
"""
return serve(request, self.file_path(request.path), insecure=True)
def get_response(self, request):
from django.http import Http404
if self._should_handle(request.path):
try:
return self.serve(request)
except Http404 as e:
if settings.DEBUG:
from django.views import debug
return debug.technical_404_response(request, e)
return super(StaticFilesHandler, self).get_response(request)
def __call__(self, environ, start_response):
if not self._should_handle(get_path_info(environ)):
return self.application(environ, start_response)
return super(StaticFilesHandler, self).__call__(environ, start_response)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.