repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
tachang/django-storages
|
refs/heads/master
|
examples/cloudfiles_project/photos/__init__.py
|
12133432
| |
yasserglez/tagfs
|
refs/heads/master
|
packages/tagfs/contrib/django/utils/version.py
|
320
|
import django
import os.path
import re
def get_svn_revision(path=None):
"""
Returns the SVN revision in the form SVN-XXXX,
where XXXX is the revision number.
Returns SVN-unknown if anything goes wrong, such as an unexpected
format of internal SVN files.
If path is provided, it should be a directory whose SVN info you want to
inspect. If it's not provided, this will use the root django/ package
directory.
"""
rev = None
if path is None:
path = django.__path__[0]
entries_path = '%s/.svn/entries' % path
try:
entries = open(entries_path, 'r').read()
except IOError:
pass
else:
# Versions >= 7 of the entries file are flat text. The first line is
# the version number. The next set of digits after 'dir' is the revision.
if re.match('(\d+)', entries):
rev_match = re.search('\d+\s+dir\s+(\d+)', entries)
if rev_match:
rev = rev_match.groups()[0]
# Older XML versions of the file specify revision as an attribute of
# the first entries node.
else:
from xml.dom import minidom
dom = minidom.parse(entries_path)
rev = dom.getElementsByTagName('entry')[0].getAttribute('revision')
if rev:
return u'SVN-%s' % rev
return u'SVN-unknown'
|
v-iam/azure-sdk-for-python
|
refs/heads/master
|
azure-mgmt-network/azure/mgmt/network/v2017_06_01/models/patch_route_filter.py
|
2
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class PatchRouteFilter(SubResource):
"""Route Filter Resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:param rules: Collection of RouteFilterRules contained within a route
filter.
:type rules: list of :class:`RouteFilterRule
<azure.mgmt.network.v2017_06_01.models.RouteFilterRule>`
:ivar peerings: A collection of references to express route circuit
peerings.
:vartype peerings: list of :class:`ExpressRouteCircuitPeering
<azure.mgmt.network.v2017_06_01.models.ExpressRouteCircuitPeering>`
:ivar provisioning_state: The provisioning state of the resource. Possible
values are: 'Updating', 'Deleting', 'Succeeded' and 'Failed'.
:vartype provisioning_state: str
:ivar name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:vartype name: str
:ivar etag: A unique read-only string that changes whenever the resource
is updated.
:vartype etag: str
:ivar type: Resource type.
:vartype type: str
:param tags: Resource tags.
:type tags: dict
"""
_validation = {
'peerings': {'readonly': True},
'provisioning_state': {'readonly': True},
'name': {'readonly': True},
'etag': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'rules': {'key': 'properties.rules', 'type': '[RouteFilterRule]'},
'peerings': {'key': 'properties.peerings', 'type': '[ExpressRouteCircuitPeering]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, id=None, rules=None, tags=None):
super(PatchRouteFilter, self).__init__(id=id)
self.rules = rules
self.peerings = None
self.provisioning_state = None
self.name = None
self.etag = None
self.type = None
self.tags = tags
|
raychorn/knowu
|
refs/heads/master
|
django/djangononrelsample2/django/template/response.py
|
221
|
from django.http import HttpResponse
from django.template import loader, Context, RequestContext
from django.utils import six
class ContentNotRenderedError(Exception):
pass
class SimpleTemplateResponse(HttpResponse):
rendering_attrs = ['template_name', 'context_data', '_post_render_callbacks']
def __init__(self, template, context=None, content_type=None, status=None,
mimetype=None):
# It would seem obvious to call these next two members 'template' and
# 'context', but those names are reserved as part of the test Client
# API. To avoid the name collision, we use tricky-to-debug problems
self.template_name = template
self.context_data = context
self._post_render_callbacks = []
# content argument doesn't make sense here because it will be replaced
# with rendered template so we always pass empty string in order to
# prevent errors and provide shorter signature.
super(SimpleTemplateResponse, self).__init__('', content_type, status,
mimetype)
# _is_rendered tracks whether the template and context has been baked
# into a final response.
# Super __init__ doesn't know any better than to set self.content to
# the empty string we just gave it, which wrongly sets _is_rendered
# True, so we initialize it to False after the call to super __init__.
self._is_rendered = False
def __getstate__(self):
"""Pickling support function.
Ensures that the object can't be pickled before it has been
rendered, and that the pickled state only includes rendered
data, not the data used to construct the response.
"""
obj_dict = super(SimpleTemplateResponse, self).__getstate__()
if not self._is_rendered:
raise ContentNotRenderedError('The response content must be '
'rendered before it can be pickled.')
for attr in self.rendering_attrs:
if attr in obj_dict:
del obj_dict[attr]
return obj_dict
def resolve_template(self, template):
"Accepts a template object, path-to-template or list of paths"
if isinstance(template, (list, tuple)):
return loader.select_template(template)
elif isinstance(template, six.string_types):
return loader.get_template(template)
else:
return template
def resolve_context(self, context):
"""Converts context data into a full Context object
(assuming it isn't already a Context object).
"""
if isinstance(context, Context):
return context
else:
return Context(context)
@property
def rendered_content(self):
"""Returns the freshly rendered content for the template and context
described by the TemplateResponse.
This *does not* set the final content of the response. To set the
response content, you must either call render(), or set the
content explicitly using the value of this property.
"""
template = self.resolve_template(self.template_name)
context = self.resolve_context(self.context_data)
content = template.render(context)
return content
def add_post_render_callback(self, callback):
"""Adds a new post-rendering callback.
If the response has already been rendered,
invoke the callback immediately.
"""
if self._is_rendered:
callback(self)
else:
self._post_render_callbacks.append(callback)
def render(self):
"""Renders (thereby finalizing) the content of the response.
If the content has already been rendered, this is a no-op.
Returns the baked response instance.
"""
retval = self
if not self._is_rendered:
self.content = self.rendered_content
for post_callback in self._post_render_callbacks:
newretval = post_callback(retval)
if newretval is not None:
retval = newretval
return retval
@property
def is_rendered(self):
return self._is_rendered
def __iter__(self):
if not self._is_rendered:
raise ContentNotRenderedError('The response content must be '
'rendered before it can be iterated over.')
return super(SimpleTemplateResponse, self).__iter__()
@property
def content(self):
if not self._is_rendered:
raise ContentNotRenderedError('The response content must be '
'rendered before it can be accessed.')
return super(SimpleTemplateResponse, self).content
@content.setter
def content(self, value):
"""Sets the content for the response
"""
HttpResponse.content.fset(self, value)
self._is_rendered = True
class TemplateResponse(SimpleTemplateResponse):
rendering_attrs = SimpleTemplateResponse.rendering_attrs + \
['_request', '_current_app']
def __init__(self, request, template, context=None, content_type=None,
status=None, mimetype=None, current_app=None):
# self.request gets over-written by django.test.client.Client - and
# unlike context_data and template_name the _request should not
# be considered part of the public API.
self._request = request
# As a convenience we'll allow callers to provide current_app without
# having to avoid needing to create the RequestContext directly
self._current_app = current_app
super(TemplateResponse, self).__init__(
template, context, content_type, status, mimetype)
def resolve_context(self, context):
"""Convert context data into a full RequestContext object
(assuming it isn't already a Context object).
"""
if isinstance(context, Context):
return context
return RequestContext(self._request, context, current_app=self._current_app)
|
pfmoore/pip
|
refs/heads/main
|
tests/lib/filesystem.py
|
4
|
"""Helpers for filesystem-dependent tests.
"""
import os
import socket
import subprocess
import sys
from functools import partial
from itertools import chain
from .path import Path
def make_socket_file(path):
# Socket paths are limited to 108 characters (sometimes less) so we
# chdir before creating it and use a relative path name.
cwd = os.getcwd()
os.chdir(os.path.dirname(path))
try:
sock = socket.socket(socket.AF_UNIX)
sock.bind(os.path.basename(path))
finally:
os.chdir(cwd)
def make_unreadable_file(path):
Path(path).touch()
os.chmod(path, 0o000)
if sys.platform == "win32":
# Once we drop PY2 we can use `os.getlogin()` instead.
username = os.environ["USERNAME"]
# Remove "Read Data/List Directory" permission for current user, but
# leave everything else.
args = ["icacls", path, "/deny", username + ":(RD)"]
subprocess.check_call(args)
def get_filelist(base):
def join(dirpath, dirnames, filenames):
relative_dirpath = os.path.relpath(dirpath, base)
join_dirpath = partial(os.path.join, relative_dirpath)
return chain(
(join_dirpath(p) for p in dirnames),
(join_dirpath(p) for p in filenames),
)
return set(chain.from_iterable(join(*dirinfo) for dirinfo in os.walk(base)))
|
blindroot/django
|
refs/heads/master
|
tests/gis_tests/geoadmin/urls.py
|
573
|
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
]
|
SnabbCo/neutron
|
refs/heads/master
|
neutron/plugins/embrane/plugins/embrane_fake_plugin.py
|
18
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Embrane, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Ivar Lazzaro, Embrane, Inc.
from neutron.db import extraroute_db
from neutron.plugins.embrane import base_plugin as base
from neutron.plugins.embrane.l2base.fake import fake_l2_plugin as l2
from neutron.plugins.embrane.l2base.fake import fakeplugin_support as sup
class EmbraneFakePlugin(base.EmbranePlugin, extraroute_db.ExtraRoute_db_mixin,
l2.FakeL2Plugin):
_plugin_support = sup.FakePluginSupport()
def __init__(self):
'''First run plugin specific initialization, then Embrane's.'''
self.supported_extension_aliases += ["extraroute", "router"]
l2.FakeL2Plugin.__init__(self)
self._run_embrane_config()
|
was4444/chromium.src
|
refs/heads/nw15
|
third_party/markdown/blockparser.py
|
109
|
# markdown is released under the BSD license
# Copyright 2007, 2008 The Python Markdown Project (v. 1.7 and later)
# Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
# Copyright 2004 Manfred Stienstra (the original version)
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE PYTHON MARKDOWN PROJECT ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL ANY CONTRIBUTORS TO THE PYTHON MARKDOWN PROJECT
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import unicode_literals
from __future__ import absolute_import
from . import util
from . import odict
class State(list):
""" Track the current and nested state of the parser.
This utility class is used to track the state of the BlockParser and
support multiple levels if nesting. It's just a simple API wrapped around
a list. Each time a state is set, that state is appended to the end of the
list. Each time a state is reset, that state is removed from the end of
the list.
Therefore, each time a state is set for a nested block, that state must be
reset when we back out of that level of nesting or the state could be
corrupted.
While all the methods of a list object are available, only the three
defined below need be used.
"""
def set(self, state):
""" Set a new state. """
self.append(state)
def reset(self):
""" Step back one step in nested state. """
self.pop()
def isstate(self, state):
""" Test that top (current) level is of given state. """
if len(self):
return self[-1] == state
else:
return False
class BlockParser:
""" Parse Markdown blocks into an ElementTree object.
A wrapper class that stitches the various BlockProcessors together,
looping through them and creating an ElementTree object.
"""
def __init__(self, markdown):
self.blockprocessors = odict.OrderedDict()
self.state = State()
self.markdown = markdown
def parseDocument(self, lines):
""" Parse a markdown document into an ElementTree.
Given a list of lines, an ElementTree object (not just a parent Element)
is created and the root element is passed to the parser as the parent.
The ElementTree object is returned.
This should only be called on an entire document, not pieces.
"""
# Create a ElementTree from the lines
self.root = util.etree.Element(self.markdown.doc_tag)
self.parseChunk(self.root, '\n'.join(lines))
return util.etree.ElementTree(self.root)
def parseChunk(self, parent, text):
""" Parse a chunk of markdown text and attach to given etree node.
While the ``text`` argument is generally assumed to contain multiple
blocks which will be split on blank lines, it could contain only one
block. Generally, this method would be called by extensions when
block parsing is required.
The ``parent`` etree Element passed in is altered in place.
Nothing is returned.
"""
self.parseBlocks(parent, text.split('\n\n'))
def parseBlocks(self, parent, blocks):
""" Process blocks of markdown text and attach to given etree node.
Given a list of ``blocks``, each blockprocessor is stepped through
until there are no blocks left. While an extension could potentially
call this method directly, it's generally expected to be used internally.
This is a public method as an extension may need to add/alter additional
BlockProcessors which call this method to recursively parse a nested
block.
"""
while blocks:
for processor in self.blockprocessors.values():
if processor.test(parent, blocks[0]):
if processor.run(parent, blocks) is not False:
# run returns True or None
break
|
Lekanich/intellij-community
|
refs/heads/master
|
python/testData/editing/misplacedElse.py
|
83
|
def foo():
pass
else<caret>
|
rhattersley/iris
|
refs/heads/master
|
lib/iris/palette.py
|
13
|
# (C) British Crown Copyright 2010 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Load, configure and register color map palettes and initialise
color map meta-data mappings.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import six
from functools import wraps
import os
import os.path
import re
import cf_units
import matplotlib.cm as mpl_cm
import matplotlib.colors as mpl_colors
import numpy as np
import iris.cube
import iris.config
# Symmetric normalization function pivot points by SI unit.
PIVOT_BY_UNIT = {cf_units.Unit('K'): 273.15}
# Color map names by palette file metadata field value.
CMAP_BREWER = set()
_CMAP_BY_SCHEME = None
_CMAP_BY_KEYWORD = None
_CMAP_BY_STD_NAME = None
_MISSING_KWARG_CMAP = 'missing kwarg cmap'
_MISSING_KWARG_NORM = 'missing kwarg norm'
def is_brewer(cmap):
"""
Determine whether the color map is a Cynthia Brewer color map.
Args:
* cmap:
The color map instance.
Returns:
Boolean.
"""
result = False
if cmap is not None:
result = cmap.name in CMAP_BREWER
return result
def _default_cmap_norm(args, kwargs):
"""
This function injects default cmap and norm behavour into the keyword
arguments, based on the cube referenced within the positional arguments.
"""
cube = None
# Find the single cube reference within the positional arguments.
for arg in args:
if isinstance(arg, iris.cube.Cube):
cube = arg
break
# Find the keyword arguments of interest.
colors = kwargs.get('colors', None)
# cmap = None to disable default behaviour.
cmap = kwargs.get('cmap', _MISSING_KWARG_CMAP)
# norm = None to disable default behaviour.
norm = kwargs.get('norm', _MISSING_KWARG_NORM)
# Note that "colors" and "cmap" keywords are mutually exclusive.
if colors is None and cube is not None:
std_name = cube.standard_name.lower() if cube.standard_name else ""
# Perform default "cmap" keyword behaviour.
if cmap == _MISSING_KWARG_CMAP:
# Check for an exact match against standard name.
cmaps = _CMAP_BY_STD_NAME.get(std_name, set())
if len(cmaps) == 0:
# Check for a fuzzy match against a keyword.
for keyword in six.iterkeys(_CMAP_BY_KEYWORD):
if keyword in std_name:
cmaps.update(_CMAP_BY_KEYWORD[keyword])
# Add default color map to keyword arguments.
if len(cmaps):
cmap = sorted(cmaps, reverse=True)[0]
kwargs['cmap'] = mpl_cm.get_cmap(cmap)
# Perform default "norm" keyword behaviour.
if norm == _MISSING_KWARG_NORM:
if 'anomaly' in std_name:
# Determine the pivot point.
pivot = PIVOT_BY_UNIT.get(cube.units, 0)
norm = SymmetricNormalize(pivot)
kwargs['norm'] = norm
return args, kwargs
def cmap_norm(cube):
"""
Determine the default :class:`matplotlib.colors.LinearSegmentedColormap`
and :class:`iris.palette.SymmetricNormalize` instances associated with
the cube.
Args:
* cube (:class:`iris.cube.Cube`):
Source cube to generate default palette from.
Returns:
Tuple of :class:`matplotlib.colors.LinearSegmentedColormap` and
:class:`iris.palette.SymmetricNormalize`
"""
args, kwargs = _default_cmap_norm((cube,), {})
return kwargs.get('cmap'), kwargs.get('norm')
def auto_palette(func):
"""
Decorator wrapper function to control the default behaviour of the
matplotlib cmap and norm keyword arguments.
Args:
* func (callable):
Callable function to be wrapped by the decorator.
Returns:
Closure wrapper function.
"""
@wraps(func)
def wrapper_func(*args, **kwargs):
"""
Closure wrapper function to provide default keyword argument
behaviour.
"""
# Update the keyword arguments with defaults.
args, kwargs = _default_cmap_norm(args, kwargs)
# Call the wrapped function and return its result.
return func(*args, **kwargs)
# Return the closure wrapper function.
return wrapper_func
class SymmetricNormalize(mpl_colors.Normalize, object):
"""
Provides a symmetric normalization class around a given pivot point.
"""
def __init__(self, pivot, *args, **kwargs):
self.pivot = pivot
self._vmin = None
self._vmax = None
mpl_colors.Normalize.__init__(self, *args, **kwargs)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.pivot)
def _update(self, val, update_min=True, update_max=True):
# Update both _vmin and _vmax from given value.
val_diff = np.abs(val - self.pivot)
vmin_diff = np.abs(self._vmin - self.pivot) if self._vmin else 0.0
vmax_diff = np.abs(self._vmax - self.pivot) if self._vmax else 0.0
diff = max(val_diff, vmin_diff, vmax_diff)
if update_min:
self._vmin = self.pivot - diff
if update_max:
self._vmax = self.pivot + diff
def _get_vmin(self):
return getattr(self, '_vmin')
def _set_vmin(self, val):
if val is None:
self._vmin = None
elif self._vmax is None:
# Don't set _vmax, it'll stop matplotlib from giving us one.
self._update(val, update_max=False)
else:
# Set both _vmin and _vmax from value
self._update(val)
vmin = property(_get_vmin, _set_vmin)
def _get_vmax(self):
return getattr(self, '_vmax')
def _set_vmax(self, val):
if val is None:
self._vmax = None
elif self._vmin is None:
# Don't set _vmin, it'll stop matplotlib from giving us one.
self._update(val, update_min=False)
else:
# Set both _vmin and _vmax from value
self._update(val)
vmax = property(_get_vmax, _set_vmax)
def _load_palette():
"""
Load, configure and register color map palettes and initialise
color map metadata mappings.
"""
# Reference these module level namespace variables.
global CMAP_BREWER, _CMAP_BY_SCHEME, _CMAP_BY_KEYWORD, _CMAP_BY_STD_NAME
_CMAP_BY_SCHEME = {}
_CMAP_BY_KEYWORD = {}
_CMAP_BY_STD_NAME = {}
filenames = []
# Identify all .txt color map palette files.
for root, dirs, files in os.walk(iris.config.PALETTE_PATH):
# Prune any .svn directory from the tree walk.
if '.svn' in dirs:
del dirs[dirs.index('.svn')]
# Identify any target .txt color map palette files.
filenames.extend([os.path.join(root, filename)
for filename in files
if os.path.splitext(filename)[1] == '.txt'])
for filename in filenames:
# Default color map name based on the file base-name (case-SENSITIVE).
cmap_name = os.path.splitext(os.path.basename(filename))[0]
cmap_scheme = None
cmap_keywords = []
cmap_std_names = []
cmap_type = None
# Perform default color map interpolation for quantization
# levels per primary color.
interpolate_flag = True
# Read the file header.
with open(filename) as file_handle:
header = filter(lambda line: re.match('^\s*#.*:\s+.*$', line),
file_handle.readlines())
# Extract the file header metadata.
for line in header:
line = line.replace('#', '', 1).split(':')
head = line[0].strip().lower()
body = line[1].strip()
if head == 'name':
# Case-SENSITIVE.
cmap_name = 'brewer_{}'.format(body)
if head == 'scheme':
# Case-insensitive.
cmap_scheme = body.lower()
if head == 'keyword':
# Case-insensitive.
keywords = [part.strip().lower() for part in body.split(',')]
cmap_keywords.extend(keywords)
if head == 'std_name':
# Case-insensitive.
std_names = [part.strip().lower() for part in body.split(',')]
cmap_std_names.extend(std_names)
if head == 'interpolate':
# Case-insensitive.
interpolate_flag = body.lower() != 'off'
if head == 'type':
# Case-insensitive.
cmap_type = body.lower()
# Integrity check for meta-data 'type' field.
assert cmap_type is not None, \
'Missing meta-data "type" keyword for color map file, "%s"' % \
filename
assert cmap_type == 'rgb', \
'Invalid type [%s] for color map file "%s"' % (cmap_type, filename)
# Update the color map look-up dictionaries.
CMAP_BREWER.add(cmap_name)
if cmap_scheme is not None:
scheme_group = _CMAP_BY_SCHEME.setdefault(cmap_scheme, set())
scheme_group.add(cmap_name)
for keyword in cmap_keywords:
keyword_group = _CMAP_BY_KEYWORD.setdefault(keyword, set())
keyword_group.add(cmap_name)
for std_name in cmap_std_names:
std_name_group = _CMAP_BY_STD_NAME.setdefault(std_name, set())
std_name_group.add(cmap_name)
# Load palette data and create the associated color map.
cmap_data = np.loadtxt(filename)
# Ensure to restrict the number of RGB quantization levels to
# prevent color map interpolation.
if interpolate_flag:
# Perform default color map interpolation for quantization
# levels per primary color.
cmap = mpl_colors.LinearSegmentedColormap.from_list(
cmap_name, cmap_data)
else:
# Restrict quantization levels per primary color (turn-off
# interpolation).
# Typically used for Brewer color maps.
cmap = mpl_colors.LinearSegmentedColormap.from_list(
cmap_name, cmap_data, N=len(cmap_data))
# Register the color map for use.
mpl_cm.register_cmap(cmap=cmap)
# Ensure to load the color map palettes.
_load_palette()
|
ales-erjavec/scipy
|
refs/heads/master
|
scipy/optimize/_linprog.py
|
29
|
"""
A top-level linear programming interface. Currently this interface only
solves linear programming problems via the Simplex Method.
.. versionadded:: 0.15.0
Functions
---------
.. autosummary::
:toctree: generated/
linprog
linprog_verbose_callback
linprog_terse_callback
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from .optimize import OptimizeResult, _check_unknown_options
__all__ = ['linprog', 'linprog_verbose_callback', 'linprog_terse_callback']
__docformat__ = "restructuredtext en"
def linprog_verbose_callback(xk, **kwargs):
"""
A sample callback function demonstrating the linprog callback interface.
This callback produces detailed output to sys.stdout before each iteration
and after the final iteration of the simplex algorithm.
Parameters
----------
xk : array_like
The current solution vector.
**kwargs : dict
A dictionary containing the following parameters:
tableau : array_like
The current tableau of the simplex algorithm.
Its structure is defined in _solve_simplex.
phase : int
The current Phase of the simplex algorithm (1 or 2)
nit : int
The current iteration number.
pivot : tuple(int, int)
The index of the tableau selected as the next pivot,
or nan if no pivot exists
basis : array(int)
A list of the current basic variables.
Each element contains the name of a basic variable and its value.
complete : bool
True if the simplex algorithm has completed
(and this is the final call to callback), otherwise False.
"""
tableau = kwargs["tableau"]
nit = kwargs["nit"]
pivrow, pivcol = kwargs["pivot"]
phase = kwargs["phase"]
basis = kwargs["basis"]
complete = kwargs["complete"]
saved_printoptions = np.get_printoptions()
np.set_printoptions(linewidth=500,
formatter={'float':lambda x: "{: 12.4f}".format(x)})
if complete:
print("--------- Iteration Complete - Phase {:d} -------\n".format(phase))
print("Tableau:")
elif nit == 0:
print("--------- Initial Tableau - Phase {:d} ----------\n".format(phase))
else:
print("--------- Iteration {:d} - Phase {:d} --------\n".format(nit, phase))
print("Tableau:")
if nit >= 0:
print("" + str(tableau) + "\n")
if not complete:
print("Pivot Element: T[{:.0f}, {:.0f}]\n".format(pivrow, pivcol))
print("Basic Variables:", basis)
print()
print("Current Solution:")
print("x = ", xk)
print()
print("Current Objective Value:")
print("f = ", -tableau[-1, -1])
print()
np.set_printoptions(**saved_printoptions)
def linprog_terse_callback(xk, **kwargs):
"""
A sample callback function demonstrating the linprog callback interface.
This callback produces brief output to sys.stdout before each iteration
and after the final iteration of the simplex algorithm.
Parameters
----------
xk : array_like
The current solution vector.
**kwargs : dict
A dictionary containing the following parameters:
tableau : array_like
The current tableau of the simplex algorithm.
Its structure is defined in _solve_simplex.
vars : tuple(str, ...)
Column headers for each column in tableau.
"x[i]" for actual variables, "s[i]" for slack surplus variables,
"a[i]" for artificial variables, and "RHS" for the constraint
RHS vector.
phase : int
The current Phase of the simplex algorithm (1 or 2)
nit : int
The current iteration number.
pivot : tuple(int, int)
The index of the tableau selected as the next pivot,
or nan if no pivot exists
basics : list[tuple(int, float)]
A list of the current basic variables.
Each element contains the index of a basic variable and
its value.
complete : bool
True if the simplex algorithm has completed
(and this is the final call to callback), otherwise False.
"""
nit = kwargs["nit"]
if nit == 0:
print("Iter: X:")
print("{: <5d} ".format(nit), end="")
print(xk)
def _pivot_col(T, tol=1.0E-12, bland=False):
"""
Given a linear programming simplex tableau, determine the column
of the variable to enter the basis.
Parameters
----------
T : 2D ndarray
The simplex tableau.
tol : float
Elements in the objective row larger than -tol will not be considered
for pivoting. Nominally this value is zero, but numerical issues
cause a tolerance about zero to be necessary.
bland : bool
If True, use Bland's rule for selection of the column (select the
first column with a negative coefficient in the objective row,
regardless of magnitude).
Returns
-------
status: bool
True if a suitable pivot column was found, otherwise False.
A return of False indicates that the linear programming simplex
algorithm is complete.
col: int
The index of the column of the pivot element.
If status is False, col will be returned as nan.
"""
ma = np.ma.masked_where(T[-1, :-1] >= -tol, T[-1, :-1], copy=False)
if ma.count() == 0:
return False, np.nan
if bland:
return True, np.where(ma.mask == False)[0][0]
return True, np.ma.where(ma == ma.min())[0][0]
def _pivot_row(T, pivcol, phase, tol=1.0E-12):
"""
Given a linear programming simplex tableau, determine the row for the
pivot operation.
Parameters
----------
T : 2D ndarray
The simplex tableau.
pivcol : int
The index of the pivot column.
phase : int
The phase of the simplex algorithm (1 or 2).
tol : float
Elements in the pivot column smaller than tol will not be considered
for pivoting. Nominally this value is zero, but numerical issues
cause a tolerance about zero to be necessary.
Returns
-------
status: bool
True if a suitable pivot row was found, otherwise False. A return
of False indicates that the linear programming problem is unbounded.
row: int
The index of the row of the pivot element. If status is False, row
will be returned as nan.
"""
if phase == 1:
k = 2
else:
k = 1
ma = np.ma.masked_where(T[:-k, pivcol] <= tol, T[:-k, pivcol], copy=False)
if ma.count() == 0:
return False, np.nan
mb = np.ma.masked_where(T[:-k, pivcol] <= tol, T[:-k, -1], copy=False)
q = mb / ma
return True, np.ma.where(q == q.min())[0][0]
def _solve_simplex(T, n, basis, maxiter=1000, phase=2, callback=None,
tol=1.0E-12, nit0=0, bland=False):
"""
Solve a linear programming problem in "standard maximization form" using
the Simplex Method.
Minimize :math:`f = c^T x`
subject to
.. math::
Ax = b
x_i >= 0
b_j >= 0
Parameters
----------
T : array_like
A 2-D array representing the simplex T corresponding to the
maximization problem. It should have the form:
[[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
[A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
.
.
.
[A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
[c[0], c[1], ..., c[n_total], 0]]
for a Phase 2 problem, or the form:
[[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
[A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
.
.
.
[A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
[c[0], c[1], ..., c[n_total], 0],
[c'[0], c'[1], ..., c'[n_total], 0]]
for a Phase 1 problem (a Problem in which a basic feasible solution is
sought prior to maximizing the actual objective. T is modified in
place by _solve_simplex.
n : int
The number of true variables in the problem.
basis : array
An array of the indices of the basic variables, such that basis[i]
contains the column corresponding to the basic variable for row i.
Basis is modified in place by _solve_simplex
maxiter : int
The maximum number of iterations to perform before aborting the
optimization.
phase : int
The phase of the optimization being executed. In phase 1 a basic
feasible solution is sought and the T has an additional row representing
an alternate objective function.
callback : callable, optional
If a callback function is provided, it will be called within each
iteration of the simplex algorithm. The callback must have the
signature `callback(xk, **kwargs)` where xk is the current solution
vector and kwargs is a dictionary containing the following::
"T" : The current Simplex algorithm T
"nit" : The current iteration.
"pivot" : The pivot (row, column) used for the next iteration.
"phase" : Whether the algorithm is in Phase 1 or Phase 2.
"basis" : The indices of the columns of the basic variables.
tol : float
The tolerance which determines when a solution is "close enough" to
zero in Phase 1 to be considered a basic feasible solution or close
enough to positive to to serve as an optimal solution.
nit0 : int
The initial iteration number used to keep an accurate iteration total
in a two-phase problem.
bland : bool
If True, choose pivots using Bland's rule [3]. In problems which
fail to converge due to cycling, using Bland's rule can provide
convergence at the expense of a less optimal path about the simplex.
Returns
-------
res : OptimizeResult
The optimization result represented as a ``OptimizeResult`` object.
Important attributes are: ``x`` the solution array, ``success`` a
Boolean flag indicating if the optimizer exited successfully and
``message`` which describes the cause of the termination. Possible
values for the ``status`` attribute are:
0 : Optimization terminated successfully
1 : Iteration limit reached
2 : Problem appears to be infeasible
3 : Problem appears to be unbounded
See `OptimizeResult` for a description of other attributes.
"""
nit = nit0
complete = False
if phase == 1:
m = T.shape[0]-2
elif phase == 2:
m = T.shape[0]-1
else:
raise ValueError("Argument 'phase' to _solve_simplex must be 1 or 2")
if phase == 2:
# Check if any artificial variables are still in the basis.
# If yes, check if any coefficients from this row and a column
# corresponding to one of the non-artificial variable is non-zero.
# If found, pivot at this term. If not, start phase 2.
# Do this for all artificial variables in the basis.
# Ref: "An Introduction to Linear Programming and Game Theory"
# by Paul R. Thie, Gerard E. Keough, 3rd Ed,
# Chapter 3.7 Redundant Systems (pag 102)
for pivrow in [row for row in range(basis.size)
if basis[row] > T.shape[1] - 2]:
non_zero_row = [col for col in range(T.shape[1] - 1)
if T[pivrow, col] != 0]
if len(non_zero_row) > 0:
pivcol = non_zero_row[0]
# variable represented by pivcol enters
# variable in basis[pivrow] leaves
basis[pivrow] = pivcol
pivval = T[pivrow][pivcol]
T[pivrow, :] = T[pivrow, :] / pivval
for irow in range(T.shape[0]):
if irow != pivrow:
T[irow, :] = T[irow, :] - T[pivrow, :]*T[irow, pivcol]
nit += 1
if len(basis[:m]) == 0:
solution = np.zeros(T.shape[1] - 1, dtype=np.float64)
else:
solution = np.zeros(max(T.shape[1] - 1, max(basis[:m]) + 1),
dtype=np.float64)
while not complete:
# Find the pivot column
pivcol_found, pivcol = _pivot_col(T, tol, bland)
if not pivcol_found:
pivcol = np.nan
pivrow = np.nan
status = 0
complete = True
else:
# Find the pivot row
pivrow_found, pivrow = _pivot_row(T, pivcol, phase, tol)
if not pivrow_found:
status = 3
complete = True
if callback is not None:
solution[:] = 0
solution[basis[:m]] = T[:m, -1]
callback(solution[:n], **{"tableau": T,
"phase":phase,
"nit":nit,
"pivot":(pivrow, pivcol),
"basis":basis,
"complete": complete and phase == 2})
if not complete:
if nit >= maxiter:
# Iteration limit exceeded
status = 1
complete = True
else:
# variable represented by pivcol enters
# variable in basis[pivrow] leaves
basis[pivrow] = pivcol
pivval = T[pivrow][pivcol]
T[pivrow, :] = T[pivrow, :] / pivval
for irow in range(T.shape[0]):
if irow != pivrow:
T[irow, :] = T[irow, :] - T[pivrow, :]*T[irow, pivcol]
nit += 1
return nit, status
def _linprog_simplex(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
bounds=None, maxiter=1000, disp=False, callback=None,
tol=1.0E-12, bland=False, **unknown_options):
"""
Solve the following linear programming problem via a two-phase
simplex algorithm.
maximize: c^T * x
subject to: A_ub * x <= b_ub
A_eq * x == b_eq
Parameters
----------
c : array_like
Coefficients of the linear objective function to be maximized.
A_ub : array_like
2-D array which, when matrix-multiplied by x, gives the values of the
upper-bound inequality constraints at x.
b_ub : array_like
1-D array of values representing the upper-bound of each inequality
constraint (row) in A_ub.
A_eq : array_like
2-D array which, when matrix-multiplied by x, gives the values of the
equality constraints at x.
b_eq : array_like
1-D array of values representing the RHS of each equality constraint
(row) in A_eq.
bounds : array_like
The bounds for each independent variable in the solution, which can take
one of three forms::
None : The default bounds, all variables are non-negative.
(lb, ub) : If a 2-element sequence is provided, the same
lower bound (lb) and upper bound (ub) will be applied
to all variables.
[(lb_0, ub_0), (lb_1, ub_1), ...] : If an n x 2 sequence is provided,
each variable x_i will be bounded by lb[i] and ub[i].
Infinite bounds are specified using -np.inf (negative)
or np.inf (positive).
callback : callable
If a callback function is provide, it will be called within each
iteration of the simplex algorithm. The callback must have the
signature `callback(xk, **kwargs)` where xk is the current solution
vector and kwargs is a dictionary containing the following::
"tableau" : The current Simplex algorithm tableau
"nit" : The current iteration.
"pivot" : The pivot (row, column) used for the next iteration.
"phase" : Whether the algorithm is in Phase 1 or Phase 2.
"bv" : A structured array containing a string representation of each
basic variable and its current value.
Options
-------
maxiter : int
The maximum number of iterations to perform.
disp : bool
If True, print exit status message to sys.stdout
tol : float
The tolerance which determines when a solution is "close enough" to zero
in Phase 1 to be considered a basic feasible solution or close enough
to positive to to serve as an optimal solution.
bland : bool
If True, use Bland's anti-cycling rule [3] to choose pivots to
prevent cycling. If False, choose pivots which should lead to a
converged solution more quickly. The latter method is subject to
cycling (non-convergence) in rare instances.
Returns
-------
A scipy.optimize.OptimizeResult consisting of the following fields::
x : ndarray
The independent variable vector which optimizes the linear
programming problem.
slack : ndarray
The values of the slack variables. Each slack variable corresponds
to an inequality constraint. If the slack is zero, then the
corresponding constraint is active.
success : bool
Returns True if the algorithm succeeded in finding an optimal
solution.
status : int
An integer representing the exit status of the optimization::
0 : Optimization terminated successfully
1 : Iteration limit reached
2 : Problem appears to be infeasible
3 : Problem appears to be unbounded
nit : int
The number of iterations performed.
message : str
A string descriptor of the exit status of the optimization.
Examples
--------
Consider the following problem:
Minimize: f = -1*x[0] + 4*x[1]
Subject to: -3*x[0] + 1*x[1] <= 6
1*x[0] + 2*x[1] <= 4
x[1] >= -3
where: -inf <= x[0] <= inf
This problem deviates from the standard linear programming problem. In
standard form, linear programming problems assume the variables x are
non-negative. Since the variables don't have standard bounds where
0 <= x <= inf, the bounds of the variables must be explicitly set.
There are two upper-bound constraints, which can be expressed as
dot(A_ub, x) <= b_ub
The input for this problem is as follows:
>>> from scipy.optimize import linprog
>>> c = [-1, 4]
>>> A = [[-3, 1], [1, 2]]
>>> b = [6, 4]
>>> x0_bnds = (None, None)
>>> x1_bnds = (-3, None)
>>> res = linprog(c, A, b, bounds=(x0_bnds, x1_bnds))
>>> print(res)
status: 0
x: array([ 10., -3.])
slack: array([ 39., 0.])
nit: 1
message: 'Optimization terminated successfully.'
fun: -22.0
success: True
References
----------
.. [1] Dantzig, George B., Linear programming and extensions. Rand
Corporation Research Study Princeton Univ. Press, Princeton, NJ, 1963
.. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to
Mathematical Programming", McGraw-Hill, Chapter 4.
.. [3] Bland, Robert G. New finite pivoting rules for the simplex method.
Mathematics of Operations Research (2), 1977: pp. 103-107.
"""
_check_unknown_options(unknown_options)
status = 0
messages = {0: "Optimization terminated successfully.",
1: "Iteration limit reached.",
2: "Optimization failed. Unable to find a feasible"
" starting point.",
3: "Optimization failed. The problem appears to be unbounded.",
4: "Optimization failed. Singular matrix encountered."}
have_floor_variable = False
cc = np.asarray(c)
# The initial value of the objective function element in the tableau
f0 = 0
# The number of variables as given by c
n = len(c)
# Convert the input arguments to arrays (sized to zero if not provided)
Aeq = np.asarray(A_eq) if A_eq is not None else np.empty([0, len(cc)])
Aub = np.asarray(A_ub) if A_ub is not None else np.empty([0, len(cc)])
beq = np.ravel(np.asarray(b_eq)) if b_eq is not None else np.empty([0])
bub = np.ravel(np.asarray(b_ub)) if b_ub is not None else np.empty([0])
# Analyze the bounds and determine what modifications to be made to
# the constraints in order to accommodate them.
L = np.zeros(n, dtype=np.float64)
U = np.ones(n, dtype=np.float64)*np.inf
if bounds is None or len(bounds) == 0:
pass
elif len(bounds) == 2 and not hasattr(bounds[0], '__len__'):
# All bounds are the same
a = bounds[0] if bounds[0] is not None else -np.inf
b = bounds[1] if bounds[1] is not None else np.inf
L = np.asarray(n*[a], dtype=np.float64)
U = np.asarray(n*[b], dtype=np.float64)
else:
if len(bounds) != n:
status = -1
message = ("Invalid input for linprog with method = 'simplex'. "
"Length of bounds is inconsistent with the length of c")
else:
try:
for i in range(n):
if len(bounds[i]) != 2:
raise IndexError()
L[i] = bounds[i][0] if bounds[i][0] is not None else -np.inf
U[i] = bounds[i][1] if bounds[i][1] is not None else np.inf
except IndexError:
status = -1
message = ("Invalid input for linprog with "
"method = 'simplex'. bounds must be a n x 2 "
"sequence/array where n = len(c).")
if np.any(L == -np.inf):
# If any lower-bound constraint is a free variable
# add the first column variable as the "floor" variable which
# accommodates the most negative variable in the problem.
n = n + 1
L = np.concatenate([np.array([0]), L])
U = np.concatenate([np.array([np.inf]), U])
cc = np.concatenate([np.array([0]), cc])
Aeq = np.hstack([np.zeros([Aeq.shape[0], 1]), Aeq])
Aub = np.hstack([np.zeros([Aub.shape[0], 1]), Aub])
have_floor_variable = True
# Now before we deal with any variables with lower bounds < 0,
# deal with finite bounds which can be simply added as new constraints.
# Also validate bounds inputs here.
for i in range(n):
if(L[i] > U[i]):
status = -1
message = ("Invalid input for linprog with method = 'simplex'. "
"Lower bound %d is greater than upper bound %d" % (i, i))
if np.isinf(L[i]) and L[i] > 0:
status = -1
message = ("Invalid input for linprog with method = 'simplex'. "
"Lower bound may not be +infinity")
if np.isinf(U[i]) and U[i] < 0:
status = -1
message = ("Invalid input for linprog with method = 'simplex'. "
"Upper bound may not be -infinity")
if np.isfinite(L[i]) and L[i] > 0:
# Add a new lower-bound (negative upper-bound) constraint
Aub = np.vstack([Aub, np.zeros(n)])
Aub[-1, i] = -1
bub = np.concatenate([bub, np.array([-L[i]])])
L[i] = 0
if np.isfinite(U[i]):
# Add a new upper-bound constraint
Aub = np.vstack([Aub, np.zeros(n)])
Aub[-1, i] = 1
bub = np.concatenate([bub, np.array([U[i]])])
U[i] = np.inf
# Now find negative lower bounds (finite or infinite) which require a
# change of variables or free variables and handle them appropriately
for i in range(0, n):
if L[i] < 0:
if np.isfinite(L[i]) and L[i] < 0:
# Add a change of variables for x[i]
# For each row in the constraint matrices, we take the
# coefficient from column i in A,
# and subtract the product of that and L[i] to the RHS b
beq = beq - Aeq[:, i] * L[i]
bub = bub - Aub[:, i] * L[i]
# We now have a nonzero initial value for the objective
# function as well.
f0 = f0 - cc[i] * L[i]
else:
# This is an unrestricted variable, let x[i] = u[i] - v[0]
# where v is the first column in all matrices.
Aeq[:, 0] = Aeq[:, 0] - Aeq[:, i]
Aub[:, 0] = Aub[:, 0] - Aub[:, i]
cc[0] = cc[0] - cc[i]
if np.isinf(U[i]):
if U[i] < 0:
status = -1
message = ("Invalid input for linprog with "
"method = 'simplex'. Upper bound may not be -inf.")
# The number of upper bound constraints (rows in A_ub and elements in b_ub)
mub = len(bub)
# The number of equality constraints (rows in A_eq and elements in b_eq)
meq = len(beq)
# The total number of constraints
m = mub+meq
# The number of slack variables (one for each of the upper-bound constraints)
n_slack = mub
# The number of artificial variables (one for each lower-bound and equality
# constraint)
n_artificial = meq + np.count_nonzero(bub < 0)
try:
Aub_rows, Aub_cols = Aub.shape
except ValueError:
raise ValueError("Invalid input. A_ub must be two-dimensional")
try:
Aeq_rows, Aeq_cols = Aeq.shape
except ValueError:
raise ValueError("Invalid input. A_eq must be two-dimensional")
if Aeq_rows != meq:
status = -1
message = ("Invalid input for linprog with method = 'simplex'. "
"The number of rows in A_eq must be equal "
"to the number of values in b_eq")
if Aub_rows != mub:
status = -1
message = ("Invalid input for linprog with method = 'simplex'. "
"The number of rows in A_ub must be equal "
"to the number of values in b_ub")
if Aeq_cols > 0 and Aeq_cols != n:
status = -1
message = ("Invalid input for linprog with method = 'simplex'. "
"Number of columns in A_eq must be equal "
"to the size of c")
if Aub_cols > 0 and Aub_cols != n:
status = -1
message = ("Invalid input for linprog with method = 'simplex'. "
"Number of columns in A_ub must be equal to the size of c")
if status != 0:
# Invalid inputs provided
raise ValueError(message)
# Create the tableau
T = np.zeros([m+2, n+n_slack+n_artificial+1])
# Insert objective into tableau
T[-2, :n] = cc
T[-2, -1] = f0
b = T[:-2, -1]
if meq > 0:
# Add Aeq to the tableau
T[:meq, :n] = Aeq
# Add beq to the tableau
b[:meq] = beq
if mub > 0:
# Add Aub to the tableau
T[meq:meq+mub, :n] = Aub
# At bub to the tableau
b[meq:meq+mub] = bub
# Add the slack variables to the tableau
np.fill_diagonal(T[meq:m, n:n+n_slack], 1)
# Further set up the tableau.
# If a row corresponds to an equality constraint or a negative b (a lower
# bound constraint), then an artificial variable is added for that row.
# Also, if b is negative, first flip the signs in that constraint.
slcount = 0
avcount = 0
basis = np.zeros(m, dtype=int)
r_artificial = np.zeros(n_artificial, dtype=int)
for i in range(m):
if i < meq or b[i] < 0:
# basic variable i is in column n+n_slack+avcount
basis[i] = n+n_slack+avcount
r_artificial[avcount] = i
avcount += 1
if b[i] < 0:
b[i] *= -1
T[i, :-1] *= -1
T[i, basis[i]] = 1
T[-1, basis[i]] = 1
else:
# basic variable i is in column n+slcount
basis[i] = n+slcount
slcount += 1
# Make the artificial variables basic feasible variables by subtracting
# each row with an artificial variable from the Phase 1 objective
for r in r_artificial:
T[-1, :] = T[-1, :] - T[r, :]
nit1, status = _solve_simplex(T, n, basis, phase=1, callback=callback,
maxiter=maxiter, tol=tol, bland=bland)
# if pseudo objective is zero, remove the last row from the tableau and
# proceed to phase 2
if abs(T[-1, -1]) < tol:
# Remove the pseudo-objective row from the tableau
T = T[:-1, :]
# Remove the artificial variable columns from the tableau
T = np.delete(T, np.s_[n+n_slack:n+n_slack+n_artificial], 1)
else:
# Failure to find a feasible starting point
status = 2
if status != 0:
message = messages[status]
if disp:
print(message)
return OptimizeResult(x=np.nan, fun=-T[-1, -1], nit=nit1, status=status,
message=message, success=False)
# Phase 2
nit2, status = _solve_simplex(T, n, basis, maxiter=maxiter-nit1, phase=2,
callback=callback, tol=tol, nit0=nit1,
bland=bland)
solution = np.zeros(n+n_slack+n_artificial)
solution[basis[:m]] = T[:m, -1]
x = solution[:n]
slack = solution[n:n+n_slack]
# For those variables with finite negative lower bounds,
# reverse the change of variables
masked_L = np.ma.array(L, mask=np.isinf(L), fill_value=0.0).filled()
x = x + masked_L
# For those variables with infinite negative lower bounds,
# take x[i] as the difference between x[i] and the floor variable.
if have_floor_variable:
for i in range(1, n):
if np.isinf(L[i]):
x[i] -= x[0]
x = x[1:]
# Optimization complete at this point
obj = -T[-1, -1]
if status in (0, 1):
if disp:
print(messages[status])
print(" Current function value: {: <12.6f}".format(obj))
print(" Iterations: {:d}".format(nit2))
else:
if disp:
print(messages[status])
print(" Iterations: {:d}".format(nit2))
return OptimizeResult(x=x, fun=obj, nit=int(nit2), status=status, slack=slack,
message=messages[status], success=(status == 0))
def linprog(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
bounds=None, method='simplex', callback=None,
options=None):
"""
Minimize a linear objective function subject to linear
equality and inequality constraints.
Linear Programming is intended to solve the following problem form:
Minimize: c^T * x
Subject to: A_ub * x <= b_ub
A_eq * x == b_eq
Parameters
----------
c : array_like
Coefficients of the linear objective function to be minimized.
A_ub : array_like, optional
2-D array which, when matrix-multiplied by x, gives the values of the
upper-bound inequality constraints at x.
b_ub : array_like, optional
1-D array of values representing the upper-bound of each inequality
constraint (row) in A_ub.
A_eq : array_like, optional
2-D array which, when matrix-multiplied by x, gives the values of the
equality constraints at x.
b_eq : array_like, optional
1-D array of values representing the RHS of each equality constraint
(row) in A_eq.
bounds : sequence, optional
``(min, max)`` pairs for each element in ``x``, defining
the bounds on that parameter. Use None for one of ``min`` or
``max`` when there is no bound in that direction. By default
bounds are ``(0, None)`` (non-negative)
If a sequence containing a single tuple is provided, then ``min`` and
``max`` will be applied to all variables in the problem.
method : str, optional
Type of solver. At this time only 'simplex' is supported
:ref:`(see here) <optimize.linprog-simplex>`.
callback : callable, optional
If a callback function is provide, it will be called within each
iteration of the simplex algorithm. The callback must have the signature
`callback(xk, **kwargs)` where xk is the current solution vector
and kwargs is a dictionary containing the following::
"tableau" : The current Simplex algorithm tableau
"nit" : The current iteration.
"pivot" : The pivot (row, column) used for the next iteration.
"phase" : Whether the algorithm is in Phase 1 or Phase 2.
"basis" : The indices of the columns of the basic variables.
options : dict, optional
A dictionary of solver options. All methods accept the following
generic options:
maxiter : int
Maximum number of iterations to perform.
disp : bool
Set to True to print convergence messages.
For method-specific options, see `show_options('linprog')`.
Returns
-------
A `scipy.optimize.OptimizeResult` consisting of the following fields:
x : ndarray
The independent variable vector which optimizes the linear
programming problem.
slack : ndarray
The values of the slack variables. Each slack variable corresponds
to an inequality constraint. If the slack is zero, then the
corresponding constraint is active.
success : bool
Returns True if the algorithm succeeded in finding an optimal
solution.
status : int
An integer representing the exit status of the optimization::
0 : Optimization terminated successfully
1 : Iteration limit reached
2 : Problem appears to be infeasible
3 : Problem appears to be unbounded
nit : int
The number of iterations performed.
message : str
A string descriptor of the exit status of the optimization.
See Also
--------
show_options : Additional options accepted by the solvers
Notes
-----
This section describes the available solvers that can be selected by the
'method' parameter. The default method is :ref:`Simplex <optimize.linprog-simplex>`.
Method *Simplex* uses the Simplex algorithm (as it relates to Linear
Programming, NOT the Nelder-Mead Simplex) [1]_, [2]_. This algorithm
should be reasonably reliable and fast.
.. versionadded:: 0.15.0
References
----------
.. [1] Dantzig, George B., Linear programming and extensions. Rand
Corporation Research Study Princeton Univ. Press, Princeton, NJ, 1963
.. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to
Mathematical Programming", McGraw-Hill, Chapter 4.
.. [3] Bland, Robert G. New finite pivoting rules for the simplex method.
Mathematics of Operations Research (2), 1977: pp. 103-107.
Examples
--------
Consider the following problem:
Minimize: f = -1*x[0] + 4*x[1]
Subject to: -3*x[0] + 1*x[1] <= 6
1*x[0] + 2*x[1] <= 4
x[1] >= -3
where: -inf <= x[0] <= inf
This problem deviates from the standard linear programming problem.
In standard form, linear programming problems assume the variables x are
non-negative. Since the variables don't have standard bounds where
0 <= x <= inf, the bounds of the variables must be explicitly set.
There are two upper-bound constraints, which can be expressed as
dot(A_ub, x) <= b_ub
The input for this problem is as follows:
>>> c = [-1, 4]
>>> A = [[-3, 1], [1, 2]]
>>> b = [6, 4]
>>> x0_bounds = (None, None)
>>> x1_bounds = (-3, None)
>>> from scipy.optimize import linprog
>>> res = linprog(c, A_ub=A, b_ub=b, bounds=(x0_bounds, x1_bounds),
... options={"disp": True})
Optimization terminated successfully.
Current function value: -22.000000
Iterations: 1
>>> print(res)
status: 0
slack: array([ 39., 0.])
success: True
fun: -22.0
x: array([ 10., -3.])
message: 'Optimization terminated successfully.'
nit: 1
Note the actual objective value is 11.428571. In this case we minimized
the negative of the objective function.
"""
meth = method.lower()
if options is None:
options = {}
if meth == 'simplex':
return _linprog_simplex(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq,
bounds=bounds, callback=callback, **options)
else:
raise ValueError('Unknown solver %s' % method)
|
zingale/pyro2
|
refs/heads/master
|
compressible/problems/advect.py
|
2
|
from __future__ import print_function
import sys
import mesh.patch as patch
import numpy as np
from util import msg
def init_data(my_data, rp):
""" initialize a smooth advection problem for testing convergence """
msg.bold("initializing the advect problem...")
# make sure that we are passed a valid patch object
if not isinstance(my_data, patch.CellCenterData2d):
print("ERROR: patch invalid in advect.py")
print(my_data.__class__)
sys.exit()
# get the density, momenta, and energy as separate variables
dens = my_data.get_var("density")
xmom = my_data.get_var("x-momentum")
ymom = my_data.get_var("y-momentum")
ener = my_data.get_var("energy")
# initialize the components, remember, that ener here is rho*eint
# + 0.5*rho*v**2, where eint is the specific internal energy
# (erg/g)
dens[:, :] = 1.0
xmom[:, :] = 0.0
ymom[:, :] = 0.0
gamma = rp.get_param("eos.gamma")
xmin = rp.get_param("mesh.xmin")
xmax = rp.get_param("mesh.xmax")
ymin = rp.get_param("mesh.ymin")
ymax = rp.get_param("mesh.ymax")
xctr = 0.5*(xmin + xmax)
yctr = 0.5*(ymin + ymax)
# this is identical to the advection/smooth problem
dens[:, :] = 1.0 + np.exp(-60.0*((my_data.grid.x2d-xctr)**2 +
(my_data.grid.y2d-yctr)**2))
# velocity is diagonal
u = 1.0
v = 1.0
xmom[:, :] = dens[:, :]*u
ymom[:, :] = dens[:, :]*v
# pressure is constant
p = 1.0
ener[:, :] = p/(gamma - 1.0) + 0.5*(xmom[:, :]**2 + ymom[:, :]**2)/dens[:, :]
def finalize():
""" print out any information to the user at the end of the run """
msg = """
"""
print(msg)
|
rooshilp/CMPUT410W15-project
|
refs/heads/master
|
testenv/lib/python2.7/site-packages/PIL/IcnsImagePlugin.py
|
26
|
#
# The Python Imaging Library.
# $Id$
#
# Mac OS X icns file decoder, based on icns.py by Bob Ippolito.
#
# history:
# 2004-10-09 fl Turned into a PIL plugin; removed 2.3 dependencies.
#
# Copyright (c) 2004 by Bob Ippolito.
# Copyright (c) 2004 by Secret Labs.
# Copyright (c) 2004 by Fredrik Lundh.
# Copyright (c) 2014 by Alastair Houghton.
#
# See the README file for information on usage and redistribution.
#
from PIL import Image, ImageFile, PngImagePlugin, _binary
import io
import struct
enable_jpeg2k = hasattr(Image.core, 'jp2klib_version')
if enable_jpeg2k:
from PIL import Jpeg2KImagePlugin
i8 = _binary.i8
HEADERSIZE = 8
def nextheader(fobj):
return struct.unpack('>4sI', fobj.read(HEADERSIZE))
def read_32t(fobj, start_length, size):
# The 128x128 icon seems to have an extra header for some reason.
(start, length) = start_length
fobj.seek(start)
sig = fobj.read(4)
if sig != b'\x00\x00\x00\x00':
raise SyntaxError('Unknown signature, expecting 0x00000000')
return read_32(fobj, (start + 4, length - 4), size)
def read_32(fobj, start_length, size):
"""
Read a 32bit RGB icon resource. Seems to be either uncompressed or
an RLE packbits-like scheme.
"""
(start, length) = start_length
fobj.seek(start)
pixel_size = (size[0] * size[2], size[1] * size[2])
sizesq = pixel_size[0] * pixel_size[1]
if length == sizesq * 3:
# uncompressed ("RGBRGBGB")
indata = fobj.read(length)
im = Image.frombuffer("RGB", pixel_size, indata, "raw", "RGB", 0, 1)
else:
# decode image
im = Image.new("RGB", pixel_size, None)
for band_ix in range(3):
data = []
bytesleft = sizesq
while bytesleft > 0:
byte = fobj.read(1)
if not byte:
break
byte = i8(byte)
if byte & 0x80:
blocksize = byte - 125
byte = fobj.read(1)
for i in range(blocksize):
data.append(byte)
else:
blocksize = byte + 1
data.append(fobj.read(blocksize))
bytesleft -= blocksize
if bytesleft <= 0:
break
if bytesleft != 0:
raise SyntaxError(
"Error reading channel [%r left]" % bytesleft
)
band = Image.frombuffer(
"L", pixel_size, b"".join(data), "raw", "L", 0, 1
)
im.im.putband(band.im, band_ix)
return {"RGB": im}
def read_mk(fobj, start_length, size):
# Alpha masks seem to be uncompressed
(start, length) = start_length
fobj.seek(start)
pixel_size = (size[0] * size[2], size[1] * size[2])
sizesq = pixel_size[0] * pixel_size[1]
band = Image.frombuffer(
"L", pixel_size, fobj.read(sizesq), "raw", "L", 0, 1
)
return {"A": band}
def read_png_or_jpeg2000(fobj, start_length, size):
(start, length) = start_length
fobj.seek(start)
sig = fobj.read(12)
if sig[:8] == b'\x89PNG\x0d\x0a\x1a\x0a':
fobj.seek(start)
im = PngImagePlugin.PngImageFile(fobj)
return {"RGBA": im}
elif sig[:4] == b'\xff\x4f\xff\x51' \
or sig[:4] == b'\x0d\x0a\x87\x0a' \
or sig == b'\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a':
if not enable_jpeg2k:
raise ValueError('Unsupported icon subimage format (rebuild PIL '
'with JPEG 2000 support to fix this)')
# j2k, jpc or j2c
fobj.seek(start)
jp2kstream = fobj.read(length)
f = io.BytesIO(jp2kstream)
im = Jpeg2KImagePlugin.Jpeg2KImageFile(f)
if im.mode != 'RGBA':
im = im.convert('RGBA')
return {"RGBA": im}
else:
raise ValueError('Unsupported icon subimage format')
class IcnsFile:
SIZES = {
(512, 512, 2): [
(b'ic10', read_png_or_jpeg2000),
],
(512, 512, 1): [
(b'ic09', read_png_or_jpeg2000),
],
(256, 256, 2): [
(b'ic14', read_png_or_jpeg2000),
],
(256, 256, 1): [
(b'ic08', read_png_or_jpeg2000),
],
(128, 128, 2): [
(b'ic13', read_png_or_jpeg2000),
],
(128, 128, 1): [
(b'ic07', read_png_or_jpeg2000),
(b'it32', read_32t),
(b't8mk', read_mk),
],
(64, 64, 1): [
(b'icp6', read_png_or_jpeg2000),
],
(32, 32, 2): [
(b'ic12', read_png_or_jpeg2000),
],
(48, 48, 1): [
(b'ih32', read_32),
(b'h8mk', read_mk),
],
(32, 32, 1): [
(b'icp5', read_png_or_jpeg2000),
(b'il32', read_32),
(b'l8mk', read_mk),
],
(16, 16, 2): [
(b'ic11', read_png_or_jpeg2000),
],
(16, 16, 1): [
(b'icp4', read_png_or_jpeg2000),
(b'is32', read_32),
(b's8mk', read_mk),
],
}
def __init__(self, fobj):
"""
fobj is a file-like object as an icns resource
"""
# signature : (start, length)
self.dct = dct = {}
self.fobj = fobj
sig, filesize = nextheader(fobj)
if sig != b'icns':
raise SyntaxError('not an icns file')
i = HEADERSIZE
while i < filesize:
sig, blocksize = nextheader(fobj)
if blocksize <= 0:
raise SyntaxError('invalid block header')
i += HEADERSIZE
blocksize -= HEADERSIZE
dct[sig] = (i, blocksize)
fobj.seek(blocksize, 1)
i += blocksize
def itersizes(self):
sizes = []
for size, fmts in self.SIZES.items():
for (fmt, reader) in fmts:
if fmt in self.dct:
sizes.append(size)
break
return sizes
def bestsize(self):
sizes = self.itersizes()
if not sizes:
raise SyntaxError("No 32bit icon resources found")
return max(sizes)
def dataforsize(self, size):
"""
Get an icon resource as {channel: array}. Note that
the arrays are bottom-up like windows bitmaps and will likely
need to be flipped or transposed in some way.
"""
dct = {}
for code, reader in self.SIZES[size]:
desc = self.dct.get(code)
if desc is not None:
dct.update(reader(self.fobj, desc, size))
return dct
def getimage(self, size=None):
if size is None:
size = self.bestsize()
if len(size) == 2:
size = (size[0], size[1], 1)
channels = self.dataforsize(size)
im = channels.get('RGBA', None)
if im:
return im
im = channels.get("RGB").copy()
try:
im.putalpha(channels["A"])
except KeyError:
pass
return im
##
# Image plugin for Mac OS icons.
class IcnsImageFile(ImageFile.ImageFile):
"""
PIL read-only image support for Mac OS .icns files.
Chooses the best resolution, but will possibly load
a different size image if you mutate the size attribute
before calling 'load'.
The info dictionary has a key 'sizes' that is a list
of sizes that the icns file has.
"""
format = "ICNS"
format_description = "Mac OS icns resource"
def _open(self):
self.icns = IcnsFile(self.fp)
self.mode = 'RGBA'
self.best_size = self.icns.bestsize()
self.size = (self.best_size[0] * self.best_size[2],
self.best_size[1] * self.best_size[2])
self.info['sizes'] = self.icns.itersizes()
# Just use this to see if it's loaded or not yet.
self.tile = ('',)
def load(self):
if len(self.size) == 3:
self.best_size = self.size
self.size = (self.best_size[0] * self.best_size[2],
self.best_size[1] * self.best_size[2])
Image.Image.load(self)
if not self.tile:
return
self.load_prepare()
# This is likely NOT the best way to do it, but whatever.
im = self.icns.getimage(self.best_size)
# If this is a PNG or JPEG 2000, it won't be loaded yet
im.load()
self.im = im.im
self.mode = im.mode
self.size = im.size
self.fp = None
self.icns = None
self.tile = ()
self.load_end()
Image.register_open("ICNS", IcnsImageFile, lambda x: x[:4] == b'icns')
Image.register_extension("ICNS", '.icns')
if __name__ == '__main__':
import os
import sys
imf = IcnsImageFile(open(sys.argv[1], 'rb'))
for size in imf.info['sizes']:
imf.size = size
imf.load()
im = imf.im
im.save('out-%s-%s-%s.png' % size)
im = Image.open(open(sys.argv[1], "rb"))
im.save("out.png")
if sys.platform == 'windows':
os.startfile("out.png")
|
sputnick-dev/weboob
|
refs/heads/master
|
modules/btdigg/__init__.py
|
7
|
from .module import BTDiggModule
__all__ = ['BTDiggModule']
|
40223240/2015cdb_w17_test
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/test/re_tests.py
|
879
|
#!/usr/bin/env python3
# -*- mode: python -*-
# Re test suite and benchmark suite v1.5
# The 3 possible outcomes for each pattern
[SUCCEED, FAIL, SYNTAX_ERROR] = range(3)
# Benchmark suite (needs expansion)
#
# The benchmark suite does not test correctness, just speed. The
# first element of each tuple is the regex pattern; the second is a
# string to match it against. The benchmarking code will embed the
# second string inside several sizes of padding, to test how regex
# matching performs on large strings.
benchmarks = [
# test common prefix
('Python|Perl', 'Perl'), # Alternation
('(Python|Perl)', 'Perl'), # Grouped alternation
('Python|Perl|Tcl', 'Perl'), # Alternation
('(Python|Perl|Tcl)', 'Perl'), # Grouped alternation
('(Python)\\1', 'PythonPython'), # Backreference
('([0a-z][a-z0-9]*,)+', 'a5,b7,c9,'), # Disable the fastmap optimization
('([a-z][a-z0-9]*,)+', 'a5,b7,c9,'), # A few sets
('Python', 'Python'), # Simple text literal
('.*Python', 'Python'), # Bad text literal
('.*Python.*', 'Python'), # Worse text literal
('.*(Python)', 'Python'), # Bad text literal with grouping
]
# Test suite (for verifying correctness)
#
# The test suite is a list of 5- or 3-tuples. The 5 parts of a
# complete tuple are:
# element 0: a string containing the pattern
# 1: the string to match against the pattern
# 2: the expected result (SUCCEED, FAIL, SYNTAX_ERROR)
# 3: a string that will be eval()'ed to produce a test string.
# This is an arbitrary Python expression; the available
# variables are "found" (the whole match), and "g1", "g2", ...
# up to "g99" contain the contents of each group, or the
# string 'None' if the group wasn't given a value, or the
# string 'Error' if the group index was out of range;
# also "groups", the return value of m.group() (a tuple).
# 4: The expected result of evaluating the expression.
# If the two don't match, an error is reported.
#
# If the regex isn't expected to work, the latter two elements can be omitted.
tests = [
# Test ?P< and ?P= extensions
('(?P<foo_123', '', SYNTAX_ERROR), # Unterminated group identifier
('(?P<1>a)', '', SYNTAX_ERROR), # Begins with a digit
('(?P<!>a)', '', SYNTAX_ERROR), # Begins with an illegal char
('(?P<foo!>a)', '', SYNTAX_ERROR), # Begins with an illegal char
# Same tests, for the ?P= form
('(?P<foo_123>a)(?P=foo_123', 'aa', SYNTAX_ERROR),
('(?P<foo_123>a)(?P=1)', 'aa', SYNTAX_ERROR),
('(?P<foo_123>a)(?P=!)', 'aa', SYNTAX_ERROR),
('(?P<foo_123>a)(?P=foo_124', 'aa', SYNTAX_ERROR), # Backref to undefined group
('(?P<foo_123>a)', 'a', SUCCEED, 'g1', 'a'),
('(?P<foo_123>a)(?P=foo_123)', 'aa', SUCCEED, 'g1', 'a'),
# Test octal escapes
('\\1', 'a', SYNTAX_ERROR), # Backreference
('[\\1]', '\1', SUCCEED, 'found', '\1'), # Character
('\\09', chr(0) + '9', SUCCEED, 'found', chr(0) + '9'),
('\\141', 'a', SUCCEED, 'found', 'a'),
('(a)(b)(c)(d)(e)(f)(g)(h)(i)(j)(k)(l)\\119', 'abcdefghijklk9', SUCCEED, 'found+"-"+g11', 'abcdefghijklk9-k'),
# Test \0 is handled everywhere
(r'\0', '\0', SUCCEED, 'found', '\0'),
(r'[\0a]', '\0', SUCCEED, 'found', '\0'),
(r'[a\0]', '\0', SUCCEED, 'found', '\0'),
(r'[^a\0]', '\0', FAIL),
# Test various letter escapes
(r'\a[\b]\f\n\r\t\v', '\a\b\f\n\r\t\v', SUCCEED, 'found', '\a\b\f\n\r\t\v'),
(r'[\a][\b][\f][\n][\r][\t][\v]', '\a\b\f\n\r\t\v', SUCCEED, 'found', '\a\b\f\n\r\t\v'),
# NOTE: not an error under PCRE/PRE:
# (r'\u', '', SYNTAX_ERROR), # A Perl escape
(r'\c\e\g\h\i\j\k\m\o\p\q\y\z', 'ceghijkmopqyz', SUCCEED, 'found', 'ceghijkmopqyz'),
(r'\xff', '\377', SUCCEED, 'found', chr(255)),
# new \x semantics
(r'\x00ffffffffffffff', '\377', FAIL, 'found', chr(255)),
(r'\x00f', '\017', FAIL, 'found', chr(15)),
(r'\x00fe', '\376', FAIL, 'found', chr(254)),
# (r'\x00ffffffffffffff', '\377', SUCCEED, 'found', chr(255)),
# (r'\x00f', '\017', SUCCEED, 'found', chr(15)),
# (r'\x00fe', '\376', SUCCEED, 'found', chr(254)),
(r"^\w+=(\\[\000-\277]|[^\n\\])*", "SRC=eval.c g.c blah blah blah \\\\\n\tapes.c",
SUCCEED, 'found', "SRC=eval.c g.c blah blah blah \\\\"),
# Test that . only matches \n in DOTALL mode
('a.b', 'acb', SUCCEED, 'found', 'acb'),
('a.b', 'a\nb', FAIL),
('a.*b', 'acc\nccb', FAIL),
('a.{4,5}b', 'acc\nccb', FAIL),
('a.b', 'a\rb', SUCCEED, 'found', 'a\rb'),
('a.b(?s)', 'a\nb', SUCCEED, 'found', 'a\nb'),
('a.*(?s)b', 'acc\nccb', SUCCEED, 'found', 'acc\nccb'),
('(?s)a.{4,5}b', 'acc\nccb', SUCCEED, 'found', 'acc\nccb'),
('(?s)a.b', 'a\nb', SUCCEED, 'found', 'a\nb'),
(')', '', SYNTAX_ERROR), # Unmatched right bracket
('', '', SUCCEED, 'found', ''), # Empty pattern
('abc', 'abc', SUCCEED, 'found', 'abc'),
('abc', 'xbc', FAIL),
('abc', 'axc', FAIL),
('abc', 'abx', FAIL),
('abc', 'xabcy', SUCCEED, 'found', 'abc'),
('abc', 'ababc', SUCCEED, 'found', 'abc'),
('ab*c', 'abc', SUCCEED, 'found', 'abc'),
('ab*bc', 'abc', SUCCEED, 'found', 'abc'),
('ab*bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab*bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab+bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab+bc', 'abc', FAIL),
('ab+bc', 'abq', FAIL),
('ab+bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab?bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab?bc', 'abc', SUCCEED, 'found', 'abc'),
('ab?bc', 'abbbbc', FAIL),
('ab?c', 'abc', SUCCEED, 'found', 'abc'),
('^abc$', 'abc', SUCCEED, 'found', 'abc'),
('^abc$', 'abcc', FAIL),
('^abc', 'abcc', SUCCEED, 'found', 'abc'),
('^abc$', 'aabc', FAIL),
('abc$', 'aabc', SUCCEED, 'found', 'abc'),
('^', 'abc', SUCCEED, 'found+"-"', '-'),
('$', 'abc', SUCCEED, 'found+"-"', '-'),
('a.c', 'abc', SUCCEED, 'found', 'abc'),
('a.c', 'axc', SUCCEED, 'found', 'axc'),
('a.*c', 'axyzc', SUCCEED, 'found', 'axyzc'),
('a.*c', 'axyzd', FAIL),
('a[bc]d', 'abc', FAIL),
('a[bc]d', 'abd', SUCCEED, 'found', 'abd'),
('a[b-d]e', 'abd', FAIL),
('a[b-d]e', 'ace', SUCCEED, 'found', 'ace'),
('a[b-d]', 'aac', SUCCEED, 'found', 'ac'),
('a[-b]', 'a-', SUCCEED, 'found', 'a-'),
('a[\\-b]', 'a-', SUCCEED, 'found', 'a-'),
# NOTE: not an error under PCRE/PRE:
# ('a[b-]', 'a-', SYNTAX_ERROR),
('a[]b', '-', SYNTAX_ERROR),
('a[', '-', SYNTAX_ERROR),
('a\\', '-', SYNTAX_ERROR),
('abc)', '-', SYNTAX_ERROR),
('(abc', '-', SYNTAX_ERROR),
('a]', 'a]', SUCCEED, 'found', 'a]'),
('a[]]b', 'a]b', SUCCEED, 'found', 'a]b'),
('a[\]]b', 'a]b', SUCCEED, 'found', 'a]b'),
('a[^bc]d', 'aed', SUCCEED, 'found', 'aed'),
('a[^bc]d', 'abd', FAIL),
('a[^-b]c', 'adc', SUCCEED, 'found', 'adc'),
('a[^-b]c', 'a-c', FAIL),
('a[^]b]c', 'a]c', FAIL),
('a[^]b]c', 'adc', SUCCEED, 'found', 'adc'),
('\\ba\\b', 'a-', SUCCEED, '"-"', '-'),
('\\ba\\b', '-a', SUCCEED, '"-"', '-'),
('\\ba\\b', '-a-', SUCCEED, '"-"', '-'),
('\\by\\b', 'xy', FAIL),
('\\by\\b', 'yz', FAIL),
('\\by\\b', 'xyz', FAIL),
('x\\b', 'xyz', FAIL),
('x\\B', 'xyz', SUCCEED, '"-"', '-'),
('\\Bz', 'xyz', SUCCEED, '"-"', '-'),
('z\\B', 'xyz', FAIL),
('\\Bx', 'xyz', FAIL),
('\\Ba\\B', 'a-', FAIL, '"-"', '-'),
('\\Ba\\B', '-a', FAIL, '"-"', '-'),
('\\Ba\\B', '-a-', FAIL, '"-"', '-'),
('\\By\\B', 'xy', FAIL),
('\\By\\B', 'yz', FAIL),
('\\By\\b', 'xy', SUCCEED, '"-"', '-'),
('\\by\\B', 'yz', SUCCEED, '"-"', '-'),
('\\By\\B', 'xyz', SUCCEED, '"-"', '-'),
('ab|cd', 'abc', SUCCEED, 'found', 'ab'),
('ab|cd', 'abcd', SUCCEED, 'found', 'ab'),
('()ef', 'def', SUCCEED, 'found+"-"+g1', 'ef-'),
('$b', 'b', FAIL),
('a\\(b', 'a(b', SUCCEED, 'found+"-"+g1', 'a(b-Error'),
('a\\(*b', 'ab', SUCCEED, 'found', 'ab'),
('a\\(*b', 'a((b', SUCCEED, 'found', 'a((b'),
('a\\\\b', 'a\\b', SUCCEED, 'found', 'a\\b'),
('((a))', 'abc', SUCCEED, 'found+"-"+g1+"-"+g2', 'a-a-a'),
('(a)b(c)', 'abc', SUCCEED, 'found+"-"+g1+"-"+g2', 'abc-a-c'),
('a+b+c', 'aabbabc', SUCCEED, 'found', 'abc'),
('(a+|b)*', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b)+', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b)?', 'ab', SUCCEED, 'found+"-"+g1', 'a-a'),
(')(', '-', SYNTAX_ERROR),
('[^ab]*', 'cde', SUCCEED, 'found', 'cde'),
('abc', '', FAIL),
('a*', '', SUCCEED, 'found', ''),
('a|b|c|d|e', 'e', SUCCEED, 'found', 'e'),
('(a|b|c|d|e)f', 'ef', SUCCEED, 'found+"-"+g1', 'ef-e'),
('abcd*efg', 'abcdefg', SUCCEED, 'found', 'abcdefg'),
('ab*', 'xabyabbbz', SUCCEED, 'found', 'ab'),
('ab*', 'xayabbbz', SUCCEED, 'found', 'a'),
('(ab|cd)e', 'abcde', SUCCEED, 'found+"-"+g1', 'cde-cd'),
('[abhgefdc]ij', 'hij', SUCCEED, 'found', 'hij'),
('^(ab|cd)e', 'abcde', FAIL, 'xg1y', 'xy'),
('(abc|)ef', 'abcdef', SUCCEED, 'found+"-"+g1', 'ef-'),
('(a|b)c*d', 'abcd', SUCCEED, 'found+"-"+g1', 'bcd-b'),
('(ab|ab*)bc', 'abc', SUCCEED, 'found+"-"+g1', 'abc-a'),
('a([bc]*)c*', 'abc', SUCCEED, 'found+"-"+g1', 'abc-bc'),
('a([bc]*)(c*d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
('a([bc]+)(c*d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
('a([bc]*)(c+d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-b-cd'),
('a[bcd]*dcdcde', 'adcdcde', SUCCEED, 'found', 'adcdcde'),
('a[bcd]+dcdcde', 'adcdcde', FAIL),
('(ab|a)b*c', 'abc', SUCCEED, 'found+"-"+g1', 'abc-ab'),
('((a)(b)c)(d)', 'abcd', SUCCEED, 'g1+"-"+g2+"-"+g3+"-"+g4', 'abc-a-b-d'),
('[a-zA-Z_][a-zA-Z0-9_]*', 'alpha', SUCCEED, 'found', 'alpha'),
('^a(bc+|b[eh])g|.h$', 'abh', SUCCEED, 'found+"-"+g1', 'bh-None'),
('(bc+d$|ef*g.|h?i(j|k))', 'effgz', SUCCEED, 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
('(bc+d$|ef*g.|h?i(j|k))', 'ij', SUCCEED, 'found+"-"+g1+"-"+g2', 'ij-ij-j'),
('(bc+d$|ef*g.|h?i(j|k))', 'effg', FAIL),
('(bc+d$|ef*g.|h?i(j|k))', 'bcdd', FAIL),
('(bc+d$|ef*g.|h?i(j|k))', 'reffgz', SUCCEED, 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
('(((((((((a)))))))))', 'a', SUCCEED, 'found', 'a'),
('multiple words of text', 'uh-uh', FAIL),
('multiple words', 'multiple words, yeah', SUCCEED, 'found', 'multiple words'),
('(.*)c(.*)', 'abcde', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcde-ab-de'),
('\\((.*), (.*)\\)', '(a, b)', SUCCEED, 'g2+"-"+g1', 'b-a'),
('[k]', 'ab', FAIL),
('a[-]?c', 'ac', SUCCEED, 'found', 'ac'),
('(abc)\\1', 'abcabc', SUCCEED, 'g1', 'abc'),
('([a-c]*)\\1', 'abcabc', SUCCEED, 'g1', 'abc'),
('^(.+)?B', 'AB', SUCCEED, 'g1', 'A'),
('(a+).\\1$', 'aaaaa', SUCCEED, 'found+"-"+g1', 'aaaaa-aa'),
('^(a+).\\1$', 'aaaa', FAIL),
('(abc)\\1', 'abcabc', SUCCEED, 'found+"-"+g1', 'abcabc-abc'),
('([a-c]+)\\1', 'abcabc', SUCCEED, 'found+"-"+g1', 'abcabc-abc'),
('(a)\\1', 'aa', SUCCEED, 'found+"-"+g1', 'aa-a'),
('(a+)\\1', 'aa', SUCCEED, 'found+"-"+g1', 'aa-a'),
('(a+)+\\1', 'aa', SUCCEED, 'found+"-"+g1', 'aa-a'),
('(a).+\\1', 'aba', SUCCEED, 'found+"-"+g1', 'aba-a'),
('(a)ba*\\1', 'aba', SUCCEED, 'found+"-"+g1', 'aba-a'),
('(aa|a)a\\1$', 'aaa', SUCCEED, 'found+"-"+g1', 'aaa-a'),
('(a|aa)a\\1$', 'aaa', SUCCEED, 'found+"-"+g1', 'aaa-a'),
('(a+)a\\1$', 'aaa', SUCCEED, 'found+"-"+g1', 'aaa-a'),
('([abc]*)\\1', 'abcabc', SUCCEED, 'found+"-"+g1', 'abcabc-abc'),
('(a)(b)c|ab', 'ab', SUCCEED, 'found+"-"+g1+"-"+g2', 'ab-None-None'),
('(a)+x', 'aaax', SUCCEED, 'found+"-"+g1', 'aaax-a'),
('([ac])+x', 'aacx', SUCCEED, 'found+"-"+g1', 'aacx-c'),
('([^/]*/)*sub1/', 'd:msgs/tdir/sub1/trial/away.cpp', SUCCEED, 'found+"-"+g1', 'd:msgs/tdir/sub1/-tdir/'),
('([^.]*)\\.([^:]*):[T ]+(.*)', 'track1.title:TBlah blah blah', SUCCEED, 'found+"-"+g1+"-"+g2+"-"+g3', 'track1.title:TBlah blah blah-track1-title-Blah blah blah'),
('([^N]*N)+', 'abNNxyzN', SUCCEED, 'found+"-"+g1', 'abNNxyzN-xyzN'),
('([^N]*N)+', 'abNNxyz', SUCCEED, 'found+"-"+g1', 'abNN-N'),
('([abc]*)x', 'abcx', SUCCEED, 'found+"-"+g1', 'abcx-abc'),
('([abc]*)x', 'abc', FAIL),
('([xyz]*)x', 'abcx', SUCCEED, 'found+"-"+g1', 'x-'),
('(a)+b|aac', 'aac', SUCCEED, 'found+"-"+g1', 'aac-None'),
# Test symbolic groups
('(?P<i d>aaa)a', 'aaaa', SYNTAX_ERROR),
('(?P<id>aaa)a', 'aaaa', SUCCEED, 'found+"-"+id', 'aaaa-aaa'),
('(?P<id>aa)(?P=id)', 'aaaa', SUCCEED, 'found+"-"+id', 'aaaa-aa'),
('(?P<id>aa)(?P=xd)', 'aaaa', SYNTAX_ERROR),
# Test octal escapes/memory references
('\\1', 'a', SYNTAX_ERROR),
('\\09', chr(0) + '9', SUCCEED, 'found', chr(0) + '9'),
('\\141', 'a', SUCCEED, 'found', 'a'),
('(a)(b)(c)(d)(e)(f)(g)(h)(i)(j)(k)(l)\\119', 'abcdefghijklk9', SUCCEED, 'found+"-"+g11', 'abcdefghijklk9-k'),
# All tests from Perl
('abc', 'abc', SUCCEED, 'found', 'abc'),
('abc', 'xbc', FAIL),
('abc', 'axc', FAIL),
('abc', 'abx', FAIL),
('abc', 'xabcy', SUCCEED, 'found', 'abc'),
('abc', 'ababc', SUCCEED, 'found', 'abc'),
('ab*c', 'abc', SUCCEED, 'found', 'abc'),
('ab*bc', 'abc', SUCCEED, 'found', 'abc'),
('ab*bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab*bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab{0,}bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab+bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab+bc', 'abc', FAIL),
('ab+bc', 'abq', FAIL),
('ab{1,}bc', 'abq', FAIL),
('ab+bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab{1,}bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab{1,3}bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab{3,4}bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab{4,5}bc', 'abbbbc', FAIL),
('ab?bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab?bc', 'abc', SUCCEED, 'found', 'abc'),
('ab{0,1}bc', 'abc', SUCCEED, 'found', 'abc'),
('ab?bc', 'abbbbc', FAIL),
('ab?c', 'abc', SUCCEED, 'found', 'abc'),
('ab{0,1}c', 'abc', SUCCEED, 'found', 'abc'),
('^abc$', 'abc', SUCCEED, 'found', 'abc'),
('^abc$', 'abcc', FAIL),
('^abc', 'abcc', SUCCEED, 'found', 'abc'),
('^abc$', 'aabc', FAIL),
('abc$', 'aabc', SUCCEED, 'found', 'abc'),
('^', 'abc', SUCCEED, 'found', ''),
('$', 'abc', SUCCEED, 'found', ''),
('a.c', 'abc', SUCCEED, 'found', 'abc'),
('a.c', 'axc', SUCCEED, 'found', 'axc'),
('a.*c', 'axyzc', SUCCEED, 'found', 'axyzc'),
('a.*c', 'axyzd', FAIL),
('a[bc]d', 'abc', FAIL),
('a[bc]d', 'abd', SUCCEED, 'found', 'abd'),
('a[b-d]e', 'abd', FAIL),
('a[b-d]e', 'ace', SUCCEED, 'found', 'ace'),
('a[b-d]', 'aac', SUCCEED, 'found', 'ac'),
('a[-b]', 'a-', SUCCEED, 'found', 'a-'),
('a[b-]', 'a-', SUCCEED, 'found', 'a-'),
('a[b-a]', '-', SYNTAX_ERROR),
('a[]b', '-', SYNTAX_ERROR),
('a[', '-', SYNTAX_ERROR),
('a]', 'a]', SUCCEED, 'found', 'a]'),
('a[]]b', 'a]b', SUCCEED, 'found', 'a]b'),
('a[^bc]d', 'aed', SUCCEED, 'found', 'aed'),
('a[^bc]d', 'abd', FAIL),
('a[^-b]c', 'adc', SUCCEED, 'found', 'adc'),
('a[^-b]c', 'a-c', FAIL),
('a[^]b]c', 'a]c', FAIL),
('a[^]b]c', 'adc', SUCCEED, 'found', 'adc'),
('ab|cd', 'abc', SUCCEED, 'found', 'ab'),
('ab|cd', 'abcd', SUCCEED, 'found', 'ab'),
('()ef', 'def', SUCCEED, 'found+"-"+g1', 'ef-'),
('*a', '-', SYNTAX_ERROR),
('(*)b', '-', SYNTAX_ERROR),
('$b', 'b', FAIL),
('a\\', '-', SYNTAX_ERROR),
('a\\(b', 'a(b', SUCCEED, 'found+"-"+g1', 'a(b-Error'),
('a\\(*b', 'ab', SUCCEED, 'found', 'ab'),
('a\\(*b', 'a((b', SUCCEED, 'found', 'a((b'),
('a\\\\b', 'a\\b', SUCCEED, 'found', 'a\\b'),
('abc)', '-', SYNTAX_ERROR),
('(abc', '-', SYNTAX_ERROR),
('((a))', 'abc', SUCCEED, 'found+"-"+g1+"-"+g2', 'a-a-a'),
('(a)b(c)', 'abc', SUCCEED, 'found+"-"+g1+"-"+g2', 'abc-a-c'),
('a+b+c', 'aabbabc', SUCCEED, 'found', 'abc'),
('a{1,}b{1,}c', 'aabbabc', SUCCEED, 'found', 'abc'),
('a**', '-', SYNTAX_ERROR),
('a.+?c', 'abcabc', SUCCEED, 'found', 'abc'),
('(a+|b)*', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b){0,}', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b)+', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b){1,}', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b)?', 'ab', SUCCEED, 'found+"-"+g1', 'a-a'),
('(a+|b){0,1}', 'ab', SUCCEED, 'found+"-"+g1', 'a-a'),
(')(', '-', SYNTAX_ERROR),
('[^ab]*', 'cde', SUCCEED, 'found', 'cde'),
('abc', '', FAIL),
('a*', '', SUCCEED, 'found', ''),
('([abc])*d', 'abbbcd', SUCCEED, 'found+"-"+g1', 'abbbcd-c'),
('([abc])*bcd', 'abcd', SUCCEED, 'found+"-"+g1', 'abcd-a'),
('a|b|c|d|e', 'e', SUCCEED, 'found', 'e'),
('(a|b|c|d|e)f', 'ef', SUCCEED, 'found+"-"+g1', 'ef-e'),
('abcd*efg', 'abcdefg', SUCCEED, 'found', 'abcdefg'),
('ab*', 'xabyabbbz', SUCCEED, 'found', 'ab'),
('ab*', 'xayabbbz', SUCCEED, 'found', 'a'),
('(ab|cd)e', 'abcde', SUCCEED, 'found+"-"+g1', 'cde-cd'),
('[abhgefdc]ij', 'hij', SUCCEED, 'found', 'hij'),
('^(ab|cd)e', 'abcde', FAIL),
('(abc|)ef', 'abcdef', SUCCEED, 'found+"-"+g1', 'ef-'),
('(a|b)c*d', 'abcd', SUCCEED, 'found+"-"+g1', 'bcd-b'),
('(ab|ab*)bc', 'abc', SUCCEED, 'found+"-"+g1', 'abc-a'),
('a([bc]*)c*', 'abc', SUCCEED, 'found+"-"+g1', 'abc-bc'),
('a([bc]*)(c*d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
('a([bc]+)(c*d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
('a([bc]*)(c+d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-b-cd'),
('a[bcd]*dcdcde', 'adcdcde', SUCCEED, 'found', 'adcdcde'),
('a[bcd]+dcdcde', 'adcdcde', FAIL),
('(ab|a)b*c', 'abc', SUCCEED, 'found+"-"+g1', 'abc-ab'),
('((a)(b)c)(d)', 'abcd', SUCCEED, 'g1+"-"+g2+"-"+g3+"-"+g4', 'abc-a-b-d'),
('[a-zA-Z_][a-zA-Z0-9_]*', 'alpha', SUCCEED, 'found', 'alpha'),
('^a(bc+|b[eh])g|.h$', 'abh', SUCCEED, 'found+"-"+g1', 'bh-None'),
('(bc+d$|ef*g.|h?i(j|k))', 'effgz', SUCCEED, 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
('(bc+d$|ef*g.|h?i(j|k))', 'ij', SUCCEED, 'found+"-"+g1+"-"+g2', 'ij-ij-j'),
('(bc+d$|ef*g.|h?i(j|k))', 'effg', FAIL),
('(bc+d$|ef*g.|h?i(j|k))', 'bcdd', FAIL),
('(bc+d$|ef*g.|h?i(j|k))', 'reffgz', SUCCEED, 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
('((((((((((a))))))))))', 'a', SUCCEED, 'g10', 'a'),
('((((((((((a))))))))))\\10', 'aa', SUCCEED, 'found', 'aa'),
# Python does not have the same rules for \\41 so this is a syntax error
# ('((((((((((a))))))))))\\41', 'aa', FAIL),
# ('((((((((((a))))))))))\\41', 'a!', SUCCEED, 'found', 'a!'),
('((((((((((a))))))))))\\41', '', SYNTAX_ERROR),
('(?i)((((((((((a))))))))))\\41', '', SYNTAX_ERROR),
('(((((((((a)))))))))', 'a', SUCCEED, 'found', 'a'),
('multiple words of text', 'uh-uh', FAIL),
('multiple words', 'multiple words, yeah', SUCCEED, 'found', 'multiple words'),
('(.*)c(.*)', 'abcde', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcde-ab-de'),
('\\((.*), (.*)\\)', '(a, b)', SUCCEED, 'g2+"-"+g1', 'b-a'),
('[k]', 'ab', FAIL),
('a[-]?c', 'ac', SUCCEED, 'found', 'ac'),
('(abc)\\1', 'abcabc', SUCCEED, 'g1', 'abc'),
('([a-c]*)\\1', 'abcabc', SUCCEED, 'g1', 'abc'),
('(?i)abc', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)abc', 'XBC', FAIL),
('(?i)abc', 'AXC', FAIL),
('(?i)abc', 'ABX', FAIL),
('(?i)abc', 'XABCY', SUCCEED, 'found', 'ABC'),
('(?i)abc', 'ABABC', SUCCEED, 'found', 'ABC'),
('(?i)ab*c', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab*bc', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab*bc', 'ABBC', SUCCEED, 'found', 'ABBC'),
('(?i)ab*?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{0,}?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab+?bc', 'ABBC', SUCCEED, 'found', 'ABBC'),
('(?i)ab+bc', 'ABC', FAIL),
('(?i)ab+bc', 'ABQ', FAIL),
('(?i)ab{1,}bc', 'ABQ', FAIL),
('(?i)ab+bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{1,}?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{1,3}?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{3,4}?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{4,5}?bc', 'ABBBBC', FAIL),
('(?i)ab??bc', 'ABBC', SUCCEED, 'found', 'ABBC'),
('(?i)ab??bc', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab{0,1}?bc', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab??bc', 'ABBBBC', FAIL),
('(?i)ab??c', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab{0,1}?c', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)^abc$', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)^abc$', 'ABCC', FAIL),
('(?i)^abc', 'ABCC', SUCCEED, 'found', 'ABC'),
('(?i)^abc$', 'AABC', FAIL),
('(?i)abc$', 'AABC', SUCCEED, 'found', 'ABC'),
('(?i)^', 'ABC', SUCCEED, 'found', ''),
('(?i)$', 'ABC', SUCCEED, 'found', ''),
('(?i)a.c', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)a.c', 'AXC', SUCCEED, 'found', 'AXC'),
('(?i)a.*?c', 'AXYZC', SUCCEED, 'found', 'AXYZC'),
('(?i)a.*c', 'AXYZD', FAIL),
('(?i)a[bc]d', 'ABC', FAIL),
('(?i)a[bc]d', 'ABD', SUCCEED, 'found', 'ABD'),
('(?i)a[b-d]e', 'ABD', FAIL),
('(?i)a[b-d]e', 'ACE', SUCCEED, 'found', 'ACE'),
('(?i)a[b-d]', 'AAC', SUCCEED, 'found', 'AC'),
('(?i)a[-b]', 'A-', SUCCEED, 'found', 'A-'),
('(?i)a[b-]', 'A-', SUCCEED, 'found', 'A-'),
('(?i)a[b-a]', '-', SYNTAX_ERROR),
('(?i)a[]b', '-', SYNTAX_ERROR),
('(?i)a[', '-', SYNTAX_ERROR),
('(?i)a]', 'A]', SUCCEED, 'found', 'A]'),
('(?i)a[]]b', 'A]B', SUCCEED, 'found', 'A]B'),
('(?i)a[^bc]d', 'AED', SUCCEED, 'found', 'AED'),
('(?i)a[^bc]d', 'ABD', FAIL),
('(?i)a[^-b]c', 'ADC', SUCCEED, 'found', 'ADC'),
('(?i)a[^-b]c', 'A-C', FAIL),
('(?i)a[^]b]c', 'A]C', FAIL),
('(?i)a[^]b]c', 'ADC', SUCCEED, 'found', 'ADC'),
('(?i)ab|cd', 'ABC', SUCCEED, 'found', 'AB'),
('(?i)ab|cd', 'ABCD', SUCCEED, 'found', 'AB'),
('(?i)()ef', 'DEF', SUCCEED, 'found+"-"+g1', 'EF-'),
('(?i)*a', '-', SYNTAX_ERROR),
('(?i)(*)b', '-', SYNTAX_ERROR),
('(?i)$b', 'B', FAIL),
('(?i)a\\', '-', SYNTAX_ERROR),
('(?i)a\\(b', 'A(B', SUCCEED, 'found+"-"+g1', 'A(B-Error'),
('(?i)a\\(*b', 'AB', SUCCEED, 'found', 'AB'),
('(?i)a\\(*b', 'A((B', SUCCEED, 'found', 'A((B'),
('(?i)a\\\\b', 'A\\B', SUCCEED, 'found', 'A\\B'),
('(?i)abc)', '-', SYNTAX_ERROR),
('(?i)(abc', '-', SYNTAX_ERROR),
('(?i)((a))', 'ABC', SUCCEED, 'found+"-"+g1+"-"+g2', 'A-A-A'),
('(?i)(a)b(c)', 'ABC', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABC-A-C'),
('(?i)a+b+c', 'AABBABC', SUCCEED, 'found', 'ABC'),
('(?i)a{1,}b{1,}c', 'AABBABC', SUCCEED, 'found', 'ABC'),
('(?i)a**', '-', SYNTAX_ERROR),
('(?i)a.+?c', 'ABCABC', SUCCEED, 'found', 'ABC'),
('(?i)a.*?c', 'ABCABC', SUCCEED, 'found', 'ABC'),
('(?i)a.{0,5}?c', 'ABCABC', SUCCEED, 'found', 'ABC'),
('(?i)(a+|b)*', 'AB', SUCCEED, 'found+"-"+g1', 'AB-B'),
('(?i)(a+|b){0,}', 'AB', SUCCEED, 'found+"-"+g1', 'AB-B'),
('(?i)(a+|b)+', 'AB', SUCCEED, 'found+"-"+g1', 'AB-B'),
('(?i)(a+|b){1,}', 'AB', SUCCEED, 'found+"-"+g1', 'AB-B'),
('(?i)(a+|b)?', 'AB', SUCCEED, 'found+"-"+g1', 'A-A'),
('(?i)(a+|b){0,1}', 'AB', SUCCEED, 'found+"-"+g1', 'A-A'),
('(?i)(a+|b){0,1}?', 'AB', SUCCEED, 'found+"-"+g1', '-None'),
('(?i))(', '-', SYNTAX_ERROR),
('(?i)[^ab]*', 'CDE', SUCCEED, 'found', 'CDE'),
('(?i)abc', '', FAIL),
('(?i)a*', '', SUCCEED, 'found', ''),
('(?i)([abc])*d', 'ABBBCD', SUCCEED, 'found+"-"+g1', 'ABBBCD-C'),
('(?i)([abc])*bcd', 'ABCD', SUCCEED, 'found+"-"+g1', 'ABCD-A'),
('(?i)a|b|c|d|e', 'E', SUCCEED, 'found', 'E'),
('(?i)(a|b|c|d|e)f', 'EF', SUCCEED, 'found+"-"+g1', 'EF-E'),
('(?i)abcd*efg', 'ABCDEFG', SUCCEED, 'found', 'ABCDEFG'),
('(?i)ab*', 'XABYABBBZ', SUCCEED, 'found', 'AB'),
('(?i)ab*', 'XAYABBBZ', SUCCEED, 'found', 'A'),
('(?i)(ab|cd)e', 'ABCDE', SUCCEED, 'found+"-"+g1', 'CDE-CD'),
('(?i)[abhgefdc]ij', 'HIJ', SUCCEED, 'found', 'HIJ'),
('(?i)^(ab|cd)e', 'ABCDE', FAIL),
('(?i)(abc|)ef', 'ABCDEF', SUCCEED, 'found+"-"+g1', 'EF-'),
('(?i)(a|b)c*d', 'ABCD', SUCCEED, 'found+"-"+g1', 'BCD-B'),
('(?i)(ab|ab*)bc', 'ABC', SUCCEED, 'found+"-"+g1', 'ABC-A'),
('(?i)a([bc]*)c*', 'ABC', SUCCEED, 'found+"-"+g1', 'ABC-BC'),
('(?i)a([bc]*)(c*d)', 'ABCD', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABCD-BC-D'),
('(?i)a([bc]+)(c*d)', 'ABCD', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABCD-BC-D'),
('(?i)a([bc]*)(c+d)', 'ABCD', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABCD-B-CD'),
('(?i)a[bcd]*dcdcde', 'ADCDCDE', SUCCEED, 'found', 'ADCDCDE'),
('(?i)a[bcd]+dcdcde', 'ADCDCDE', FAIL),
('(?i)(ab|a)b*c', 'ABC', SUCCEED, 'found+"-"+g1', 'ABC-AB'),
('(?i)((a)(b)c)(d)', 'ABCD', SUCCEED, 'g1+"-"+g2+"-"+g3+"-"+g4', 'ABC-A-B-D'),
('(?i)[a-zA-Z_][a-zA-Z0-9_]*', 'ALPHA', SUCCEED, 'found', 'ALPHA'),
('(?i)^a(bc+|b[eh])g|.h$', 'ABH', SUCCEED, 'found+"-"+g1', 'BH-None'),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'EFFGZ', SUCCEED, 'found+"-"+g1+"-"+g2', 'EFFGZ-EFFGZ-None'),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'IJ', SUCCEED, 'found+"-"+g1+"-"+g2', 'IJ-IJ-J'),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'EFFG', FAIL),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'BCDD', FAIL),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'REFFGZ', SUCCEED, 'found+"-"+g1+"-"+g2', 'EFFGZ-EFFGZ-None'),
('(?i)((((((((((a))))))))))', 'A', SUCCEED, 'g10', 'A'),
('(?i)((((((((((a))))))))))\\10', 'AA', SUCCEED, 'found', 'AA'),
#('(?i)((((((((((a))))))))))\\41', 'AA', FAIL),
#('(?i)((((((((((a))))))))))\\41', 'A!', SUCCEED, 'found', 'A!'),
('(?i)(((((((((a)))))))))', 'A', SUCCEED, 'found', 'A'),
('(?i)(?:(?:(?:(?:(?:(?:(?:(?:(?:(a))))))))))', 'A', SUCCEED, 'g1', 'A'),
('(?i)(?:(?:(?:(?:(?:(?:(?:(?:(?:(a|b|c))))))))))', 'C', SUCCEED, 'g1', 'C'),
('(?i)multiple words of text', 'UH-UH', FAIL),
('(?i)multiple words', 'MULTIPLE WORDS, YEAH', SUCCEED, 'found', 'MULTIPLE WORDS'),
('(?i)(.*)c(.*)', 'ABCDE', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABCDE-AB-DE'),
('(?i)\\((.*), (.*)\\)', '(A, B)', SUCCEED, 'g2+"-"+g1', 'B-A'),
('(?i)[k]', 'AB', FAIL),
# ('(?i)abcd', 'ABCD', SUCCEED, 'found+"-"+\\found+"-"+\\\\found', 'ABCD-$&-\\ABCD'),
# ('(?i)a(bc)d', 'ABCD', SUCCEED, 'g1+"-"+\\g1+"-"+\\\\g1', 'BC-$1-\\BC'),
('(?i)a[-]?c', 'AC', SUCCEED, 'found', 'AC'),
('(?i)(abc)\\1', 'ABCABC', SUCCEED, 'g1', 'ABC'),
('(?i)([a-c]*)\\1', 'ABCABC', SUCCEED, 'g1', 'ABC'),
('a(?!b).', 'abad', SUCCEED, 'found', 'ad'),
('a(?=d).', 'abad', SUCCEED, 'found', 'ad'),
('a(?=c|d).', 'abad', SUCCEED, 'found', 'ad'),
('a(?:b|c|d)(.)', 'ace', SUCCEED, 'g1', 'e'),
('a(?:b|c|d)*(.)', 'ace', SUCCEED, 'g1', 'e'),
('a(?:b|c|d)+?(.)', 'ace', SUCCEED, 'g1', 'e'),
('a(?:b|(c|e){1,2}?|d)+?(.)', 'ace', SUCCEED, 'g1 + g2', 'ce'),
('^(.+)?B', 'AB', SUCCEED, 'g1', 'A'),
# lookbehind: split by : but not if it is escaped by -.
('(?<!-):(.*?)(?<!-):', 'a:bc-:de:f', SUCCEED, 'g1', 'bc-:de' ),
# escaping with \ as we know it
('(?<!\\\):(.*?)(?<!\\\):', 'a:bc\\:de:f', SUCCEED, 'g1', 'bc\\:de' ),
# terminating with ' and escaping with ? as in edifact
("(?<!\\?)'(.*?)(?<!\\?)'", "a'bc?'de'f", SUCCEED, 'g1', "bc?'de" ),
# Comments using the (?#...) syntax
('w(?# comment', 'w', SYNTAX_ERROR),
('w(?# comment 1)xy(?# comment 2)z', 'wxyz', SUCCEED, 'found', 'wxyz'),
# Check odd placement of embedded pattern modifiers
# not an error under PCRE/PRE:
('w(?i)', 'W', SUCCEED, 'found', 'W'),
# ('w(?i)', 'W', SYNTAX_ERROR),
# Comments using the x embedded pattern modifier
("""(?x)w# comment 1
x y
# comment 2
z""", 'wxyz', SUCCEED, 'found', 'wxyz'),
# using the m embedded pattern modifier
('^abc', """jkl
abc
xyz""", FAIL),
('(?m)^abc', """jkl
abc
xyz""", SUCCEED, 'found', 'abc'),
('(?m)abc$', """jkl
xyzabc
123""", SUCCEED, 'found', 'abc'),
# using the s embedded pattern modifier
('a.b', 'a\nb', FAIL),
('(?s)a.b', 'a\nb', SUCCEED, 'found', 'a\nb'),
# test \w, etc. both inside and outside character classes
('\\w+', '--ab_cd0123--', SUCCEED, 'found', 'ab_cd0123'),
('[\\w]+', '--ab_cd0123--', SUCCEED, 'found', 'ab_cd0123'),
('\\D+', '1234abc5678', SUCCEED, 'found', 'abc'),
('[\\D]+', '1234abc5678', SUCCEED, 'found', 'abc'),
('[\\da-fA-F]+', '123abc', SUCCEED, 'found', '123abc'),
# not an error under PCRE/PRE:
# ('[\\d-x]', '-', SYNTAX_ERROR),
(r'([\s]*)([\S]*)([\s]*)', ' testing!1972', SUCCEED, 'g3+g2+g1', 'testing!1972 '),
(r'(\s*)(\S*)(\s*)', ' testing!1972', SUCCEED, 'g3+g2+g1', 'testing!1972 '),
(r'\xff', '\377', SUCCEED, 'found', chr(255)),
# new \x semantics
(r'\x00ff', '\377', FAIL),
# (r'\x00ff', '\377', SUCCEED, 'found', chr(255)),
(r'\t\n\v\r\f\a\g', '\t\n\v\r\f\ag', SUCCEED, 'found', '\t\n\v\r\f\ag'),
('\t\n\v\r\f\a\g', '\t\n\v\r\f\ag', SUCCEED, 'found', '\t\n\v\r\f\ag'),
(r'\t\n\v\r\f\a', '\t\n\v\r\f\a', SUCCEED, 'found', chr(9)+chr(10)+chr(11)+chr(13)+chr(12)+chr(7)),
(r'[\t][\n][\v][\r][\f][\b]', '\t\n\v\r\f\b', SUCCEED, 'found', '\t\n\v\r\f\b'),
#
# post-1.5.2 additions
# xmllib problem
(r'(([a-z]+):)?([a-z]+)$', 'smil', SUCCEED, 'g1+"-"+g2+"-"+g3', 'None-None-smil'),
# bug 110866: reference to undefined group
(r'((.)\1+)', '', SYNTAX_ERROR),
# bug 111869: search (PRE/PCRE fails on this one, SRE doesn't)
(r'.*d', 'abc\nabd', SUCCEED, 'found', 'abd'),
# bug 112468: various expected syntax errors
(r'(', '', SYNTAX_ERROR),
(r'[\41]', '!', SUCCEED, 'found', '!'),
# bug 114033: nothing to repeat
(r'(x?)?', 'x', SUCCEED, 'found', 'x'),
# bug 115040: rescan if flags are modified inside pattern
(r' (?x)foo ', 'foo', SUCCEED, 'found', 'foo'),
# bug 115618: negative lookahead
(r'(?<!abc)(d.f)', 'abcdefdof', SUCCEED, 'found', 'dof'),
# bug 116251: character class bug
(r'[\w-]+', 'laser_beam', SUCCEED, 'found', 'laser_beam'),
# bug 123769+127259: non-greedy backtracking bug
(r'.*?\S *:', 'xx:', SUCCEED, 'found', 'xx:'),
(r'a[ ]*?\ (\d+).*', 'a 10', SUCCEED, 'found', 'a 10'),
(r'a[ ]*?\ (\d+).*', 'a 10', SUCCEED, 'found', 'a 10'),
# bug 127259: \Z shouldn't depend on multiline mode
(r'(?ms).*?x\s*\Z(.*)','xx\nx\n', SUCCEED, 'g1', ''),
# bug 128899: uppercase literals under the ignorecase flag
(r'(?i)M+', 'MMM', SUCCEED, 'found', 'MMM'),
(r'(?i)m+', 'MMM', SUCCEED, 'found', 'MMM'),
(r'(?i)[M]+', 'MMM', SUCCEED, 'found', 'MMM'),
(r'(?i)[m]+', 'MMM', SUCCEED, 'found', 'MMM'),
# bug 130748: ^* should be an error (nothing to repeat)
(r'^*', '', SYNTAX_ERROR),
# bug 133283: minimizing repeat problem
(r'"(?:\\"|[^"])*?"', r'"\""', SUCCEED, 'found', r'"\""'),
# bug 477728: minimizing repeat problem
(r'^.*?$', 'one\ntwo\nthree\n', FAIL),
# bug 483789: minimizing repeat problem
(r'a[^>]*?b', 'a>b', FAIL),
# bug 490573: minimizing repeat problem
(r'^a*?$', 'foo', FAIL),
# bug 470582: nested groups problem
(r'^((a)c)?(ab)$', 'ab', SUCCEED, 'g1+"-"+g2+"-"+g3', 'None-None-ab'),
# another minimizing repeat problem (capturing groups in assertions)
('^([ab]*?)(?=(b)?)c', 'abc', SUCCEED, 'g1+"-"+g2', 'ab-None'),
('^([ab]*?)(?!(b))c', 'abc', SUCCEED, 'g1+"-"+g2', 'ab-None'),
('^([ab]*?)(?<!(a))c', 'abc', SUCCEED, 'g1+"-"+g2', 'ab-None'),
]
u = '\N{LATIN CAPITAL LETTER A WITH DIAERESIS}'
tests.extend([
# bug 410271: \b broken under locales
(r'\b.\b', 'a', SUCCEED, 'found', 'a'),
(r'(?u)\b.\b', u, SUCCEED, 'found', u),
(r'(?u)\w', u, SUCCEED, 'found', u),
])
|
matsumoto-r/synciga
|
refs/heads/master
|
src/third_party/yasm/source/patched-yasm/modules/arch/x86/tests/gen-fma-test.py
|
28
|
#!/usr/bin/env python
def emit(opcode,suffix,width,order,optype):
d = {}
d['opcode']=opcode
d['suffix']=suffix
d['order']=order
if width == 128:
d['op1']= 'xmm1'
d['op2']= 'xmm2'
d['op3']= 'xmm3'
if optype == 'rrr':
d['op3']= 'xmm3'
elif suffix == 'pd':
d['op3']= 'dqword [rax]'
elif suffix == 'sd':
d['op3']= 'qword [rax]'
elif suffix == 'ss':
d['op3']= 'dword [rax]'
else:
d['op1']= 'ymm1'
d['op2']= 'ymm2'
if optype == 'rrr':
d['op3']= 'ymm3'
else:
d['op3']= 'yword [rax]'
print "v%(opcode)s%(order)s%(suffix)s %(op1)s, %(op2)s, %(op3)s" % (d)
if optype == 'rrm':
d['op3']= '[rax]'
print "v%(opcode)s%(order)s%(suffix)s %(op1)s, %(op2)s, %(op3)s" % (d)
def gen(opcodes, combos, optypes, orders):
for opcode in opcodes:
for (suffix,width) in combos:
for order in orders:
for optype in optypes:
emit(opcode,suffix,width,order,optype)
if __name__ == '__main__':
orders = ['132', '231', '213']
all_combos = [('ss',128),
('sd',128),
('ps',128),
('ps',256),
('pd',128),
('pd',256) ]
packed_combos = [ ('ps',128),
('ps',256),
('pd',128),
('pd',256) ]
opcodes1 = ['fmadd', 'fmsub', 'fnmadd', 'fnmsub']
opcodes2 = ['fmaddsub', 'fmsubadd']
optypes = ['rrr','rrm']
print "[bits 64]"
gen(opcodes1, all_combos,optypes, orders)
gen(opcodes2, packed_combos,optypes, orders)
|
holmes/intellij-community
|
refs/heads/master
|
python/testData/mover/simpleBlankLines.py
|
83
|
if a:
a = 1
b =<caret> 2
c = 3
|
ArcherSys/ArcherSys
|
refs/heads/master
|
Lib/test/test_source_encoding.py
|
1
|
<<<<<<< HEAD
<<<<<<< HEAD
# -*- coding: koi8-r -*-
import unittest
from test.support import TESTFN, unlink, unload, rmtree
import importlib
import os
import sys
import subprocess
class SourceEncodingTest(unittest.TestCase):
def test_pep263(self):
self.assertEqual(
"ðÉÔÏÎ".encode("utf-8"),
b'\xd0\x9f\xd0\xb8\xd1\x82\xd0\xbe\xd0\xbd'
)
self.assertEqual(
"\ð".encode("utf-8"),
b'\\\xd0\x9f'
)
def test_compilestring(self):
# see #1882
c = compile(b"\n# coding: utf-8\nu = '\xc3\xb3'\n", "dummy", "exec")
d = {}
exec(c, d)
self.assertEqual(d['u'], '\xf3')
def test_issue2301(self):
try:
compile(b"# coding: cp932\nprint '\x94\x4e'", "dummy", "exec")
except SyntaxError as v:
self.assertEqual(v.text, "print '\u5e74'\n")
else:
self.fail()
def test_issue4626(self):
c = compile("# coding=latin-1\n\u00c6 = '\u00c6'", "dummy", "exec")
d = {}
exec(c, d)
self.assertEqual(d['\xc6'], '\xc6')
def test_issue3297(self):
c = compile("a, b = '\U0001010F', '\\U0001010F'", "dummy", "exec")
d = {}
exec(c, d)
self.assertEqual(d['a'], d['b'])
self.assertEqual(len(d['a']), len(d['b']))
self.assertEqual(ascii(d['a']), ascii(d['b']))
def test_issue7820(self):
# Ensure that check_bom() restores all bytes in the right order if
# check_bom() fails in pydebug mode: a buffer starts with the first
# byte of a valid BOM, but next bytes are different
# one byte in common with the UTF-16-LE BOM
self.assertRaises(SyntaxError, eval, b'\xff\x20')
# two bytes in common with the UTF-8 BOM
self.assertRaises(SyntaxError, eval, b'\xef\xbb\x20')
def test_20731(self):
sub = subprocess.Popen([sys.executable,
os.path.join(os.path.dirname(__file__),
'coding20731.py')],
stderr=subprocess.PIPE)
err = sub.communicate()[1]
self.assertEqual(sub.returncode, 0)
self.assertNotIn(b'SyntaxError', err)
def test_error_message(self):
compile(b'# -*- coding: iso-8859-15 -*-\n', 'dummy', 'exec')
compile(b'\xef\xbb\xbf\n', 'dummy', 'exec')
compile(b'\xef\xbb\xbf# -*- coding: utf-8 -*-\n', 'dummy', 'exec')
with self.assertRaisesRegex(SyntaxError, 'fake'):
compile(b'# -*- coding: fake -*-\n', 'dummy', 'exec')
with self.assertRaisesRegex(SyntaxError, 'iso-8859-15'):
compile(b'\xef\xbb\xbf# -*- coding: iso-8859-15 -*-\n',
'dummy', 'exec')
with self.assertRaisesRegex(SyntaxError, 'BOM'):
compile(b'\xef\xbb\xbf# -*- coding: iso-8859-15 -*-\n',
'dummy', 'exec')
with self.assertRaisesRegex(SyntaxError, 'fake'):
compile(b'\xef\xbb\xbf# -*- coding: fake -*-\n', 'dummy', 'exec')
with self.assertRaisesRegex(SyntaxError, 'BOM'):
compile(b'\xef\xbb\xbf# -*- coding: fake -*-\n', 'dummy', 'exec')
def test_bad_coding(self):
module_name = 'bad_coding'
self.verify_bad_module(module_name)
def test_bad_coding2(self):
module_name = 'bad_coding2'
self.verify_bad_module(module_name)
def verify_bad_module(self, module_name):
self.assertRaises(SyntaxError, __import__, 'test.' + module_name)
path = os.path.dirname(__file__)
filename = os.path.join(path, module_name + '.py')
with open(filename, "rb") as fp:
bytes = fp.read()
self.assertRaises(SyntaxError, compile, bytes, filename, 'exec')
def test_exec_valid_coding(self):
d = {}
exec(b'# coding: cp949\na = "\xaa\xa7"\n', d)
self.assertEqual(d['a'], '\u3047')
def test_file_parse(self):
# issue1134: all encodings outside latin-1 and utf-8 fail on
# multiline strings and long lines (>512 columns)
unload(TESTFN)
filename = TESTFN + ".py"
f = open(filename, "w", encoding="cp1252")
sys.path.insert(0, os.curdir)
try:
with f:
f.write("# -*- coding: cp1252 -*-\n")
f.write("'''A short string\n")
f.write("'''\n")
f.write("'A very long string %s'\n" % ("X" * 1000))
importlib.invalidate_caches()
__import__(TESTFN)
finally:
del sys.path[0]
unlink(filename)
unlink(filename + "c")
unlink(filename + "o")
unload(TESTFN)
rmtree('__pycache__')
def test_error_from_string(self):
# See http://bugs.python.org/issue6289
input = "# coding: ascii\n\N{SNOWMAN}".encode('utf-8')
with self.assertRaises(SyntaxError) as c:
compile(input, "<string>", "exec")
expected = "'ascii' codec can't decode byte 0xe2 in position 16: " \
"ordinal not in range(128)"
self.assertTrue(c.exception.args[0].startswith(expected),
msg=c.exception.args[0])
if __name__ == "__main__":
unittest.main()
=======
# -*- coding: koi8-r -*-
import unittest
from test.support import TESTFN, unlink, unload, rmtree
import importlib
import os
import sys
import subprocess
class SourceEncodingTest(unittest.TestCase):
def test_pep263(self):
self.assertEqual(
"ðÉÔÏÎ".encode("utf-8"),
b'\xd0\x9f\xd0\xb8\xd1\x82\xd0\xbe\xd0\xbd'
)
self.assertEqual(
"\ð".encode("utf-8"),
b'\\\xd0\x9f'
)
def test_compilestring(self):
# see #1882
c = compile(b"\n# coding: utf-8\nu = '\xc3\xb3'\n", "dummy", "exec")
d = {}
exec(c, d)
self.assertEqual(d['u'], '\xf3')
def test_issue2301(self):
try:
compile(b"# coding: cp932\nprint '\x94\x4e'", "dummy", "exec")
except SyntaxError as v:
self.assertEqual(v.text, "print '\u5e74'\n")
else:
self.fail()
def test_issue4626(self):
c = compile("# coding=latin-1\n\u00c6 = '\u00c6'", "dummy", "exec")
d = {}
exec(c, d)
self.assertEqual(d['\xc6'], '\xc6')
def test_issue3297(self):
c = compile("a, b = '\U0001010F', '\\U0001010F'", "dummy", "exec")
d = {}
exec(c, d)
self.assertEqual(d['a'], d['b'])
self.assertEqual(len(d['a']), len(d['b']))
self.assertEqual(ascii(d['a']), ascii(d['b']))
def test_issue7820(self):
# Ensure that check_bom() restores all bytes in the right order if
# check_bom() fails in pydebug mode: a buffer starts with the first
# byte of a valid BOM, but next bytes are different
# one byte in common with the UTF-16-LE BOM
self.assertRaises(SyntaxError, eval, b'\xff\x20')
# two bytes in common with the UTF-8 BOM
self.assertRaises(SyntaxError, eval, b'\xef\xbb\x20')
def test_20731(self):
sub = subprocess.Popen([sys.executable,
os.path.join(os.path.dirname(__file__),
'coding20731.py')],
stderr=subprocess.PIPE)
err = sub.communicate()[1]
self.assertEqual(sub.returncode, 0)
self.assertNotIn(b'SyntaxError', err)
def test_error_message(self):
compile(b'# -*- coding: iso-8859-15 -*-\n', 'dummy', 'exec')
compile(b'\xef\xbb\xbf\n', 'dummy', 'exec')
compile(b'\xef\xbb\xbf# -*- coding: utf-8 -*-\n', 'dummy', 'exec')
with self.assertRaisesRegex(SyntaxError, 'fake'):
compile(b'# -*- coding: fake -*-\n', 'dummy', 'exec')
with self.assertRaisesRegex(SyntaxError, 'iso-8859-15'):
compile(b'\xef\xbb\xbf# -*- coding: iso-8859-15 -*-\n',
'dummy', 'exec')
with self.assertRaisesRegex(SyntaxError, 'BOM'):
compile(b'\xef\xbb\xbf# -*- coding: iso-8859-15 -*-\n',
'dummy', 'exec')
with self.assertRaisesRegex(SyntaxError, 'fake'):
compile(b'\xef\xbb\xbf# -*- coding: fake -*-\n', 'dummy', 'exec')
with self.assertRaisesRegex(SyntaxError, 'BOM'):
compile(b'\xef\xbb\xbf# -*- coding: fake -*-\n', 'dummy', 'exec')
def test_bad_coding(self):
module_name = 'bad_coding'
self.verify_bad_module(module_name)
def test_bad_coding2(self):
module_name = 'bad_coding2'
self.verify_bad_module(module_name)
def verify_bad_module(self, module_name):
self.assertRaises(SyntaxError, __import__, 'test.' + module_name)
path = os.path.dirname(__file__)
filename = os.path.join(path, module_name + '.py')
with open(filename, "rb") as fp:
bytes = fp.read()
self.assertRaises(SyntaxError, compile, bytes, filename, 'exec')
def test_exec_valid_coding(self):
d = {}
exec(b'# coding: cp949\na = "\xaa\xa7"\n', d)
self.assertEqual(d['a'], '\u3047')
def test_file_parse(self):
# issue1134: all encodings outside latin-1 and utf-8 fail on
# multiline strings and long lines (>512 columns)
unload(TESTFN)
filename = TESTFN + ".py"
f = open(filename, "w", encoding="cp1252")
sys.path.insert(0, os.curdir)
try:
with f:
f.write("# -*- coding: cp1252 -*-\n")
f.write("'''A short string\n")
f.write("'''\n")
f.write("'A very long string %s'\n" % ("X" * 1000))
importlib.invalidate_caches()
__import__(TESTFN)
finally:
del sys.path[0]
unlink(filename)
unlink(filename + "c")
unlink(filename + "o")
unload(TESTFN)
rmtree('__pycache__')
def test_error_from_string(self):
# See http://bugs.python.org/issue6289
input = "# coding: ascii\n\N{SNOWMAN}".encode('utf-8')
with self.assertRaises(SyntaxError) as c:
compile(input, "<string>", "exec")
expected = "'ascii' codec can't decode byte 0xe2 in position 16: " \
"ordinal not in range(128)"
self.assertTrue(c.exception.args[0].startswith(expected),
msg=c.exception.args[0])
if __name__ == "__main__":
unittest.main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
# -*- coding: koi8-r -*-
import unittest
from test.support import TESTFN, unlink, unload, rmtree
import importlib
import os
import sys
import subprocess
class SourceEncodingTest(unittest.TestCase):
def test_pep263(self):
self.assertEqual(
"ðÉÔÏÎ".encode("utf-8"),
b'\xd0\x9f\xd0\xb8\xd1\x82\xd0\xbe\xd0\xbd'
)
self.assertEqual(
"\ð".encode("utf-8"),
b'\\\xd0\x9f'
)
def test_compilestring(self):
# see #1882
c = compile(b"\n# coding: utf-8\nu = '\xc3\xb3'\n", "dummy", "exec")
d = {}
exec(c, d)
self.assertEqual(d['u'], '\xf3')
def test_issue2301(self):
try:
compile(b"# coding: cp932\nprint '\x94\x4e'", "dummy", "exec")
except SyntaxError as v:
self.assertEqual(v.text, "print '\u5e74'\n")
else:
self.fail()
def test_issue4626(self):
c = compile("# coding=latin-1\n\u00c6 = '\u00c6'", "dummy", "exec")
d = {}
exec(c, d)
self.assertEqual(d['\xc6'], '\xc6')
def test_issue3297(self):
c = compile("a, b = '\U0001010F', '\\U0001010F'", "dummy", "exec")
d = {}
exec(c, d)
self.assertEqual(d['a'], d['b'])
self.assertEqual(len(d['a']), len(d['b']))
self.assertEqual(ascii(d['a']), ascii(d['b']))
def test_issue7820(self):
# Ensure that check_bom() restores all bytes in the right order if
# check_bom() fails in pydebug mode: a buffer starts with the first
# byte of a valid BOM, but next bytes are different
# one byte in common with the UTF-16-LE BOM
self.assertRaises(SyntaxError, eval, b'\xff\x20')
# two bytes in common with the UTF-8 BOM
self.assertRaises(SyntaxError, eval, b'\xef\xbb\x20')
def test_20731(self):
sub = subprocess.Popen([sys.executable,
os.path.join(os.path.dirname(__file__),
'coding20731.py')],
stderr=subprocess.PIPE)
err = sub.communicate()[1]
self.assertEqual(sub.returncode, 0)
self.assertNotIn(b'SyntaxError', err)
def test_error_message(self):
compile(b'# -*- coding: iso-8859-15 -*-\n', 'dummy', 'exec')
compile(b'\xef\xbb\xbf\n', 'dummy', 'exec')
compile(b'\xef\xbb\xbf# -*- coding: utf-8 -*-\n', 'dummy', 'exec')
with self.assertRaisesRegex(SyntaxError, 'fake'):
compile(b'# -*- coding: fake -*-\n', 'dummy', 'exec')
with self.assertRaisesRegex(SyntaxError, 'iso-8859-15'):
compile(b'\xef\xbb\xbf# -*- coding: iso-8859-15 -*-\n',
'dummy', 'exec')
with self.assertRaisesRegex(SyntaxError, 'BOM'):
compile(b'\xef\xbb\xbf# -*- coding: iso-8859-15 -*-\n',
'dummy', 'exec')
with self.assertRaisesRegex(SyntaxError, 'fake'):
compile(b'\xef\xbb\xbf# -*- coding: fake -*-\n', 'dummy', 'exec')
with self.assertRaisesRegex(SyntaxError, 'BOM'):
compile(b'\xef\xbb\xbf# -*- coding: fake -*-\n', 'dummy', 'exec')
def test_bad_coding(self):
module_name = 'bad_coding'
self.verify_bad_module(module_name)
def test_bad_coding2(self):
module_name = 'bad_coding2'
self.verify_bad_module(module_name)
def verify_bad_module(self, module_name):
self.assertRaises(SyntaxError, __import__, 'test.' + module_name)
path = os.path.dirname(__file__)
filename = os.path.join(path, module_name + '.py')
with open(filename, "rb") as fp:
bytes = fp.read()
self.assertRaises(SyntaxError, compile, bytes, filename, 'exec')
def test_exec_valid_coding(self):
d = {}
exec(b'# coding: cp949\na = "\xaa\xa7"\n', d)
self.assertEqual(d['a'], '\u3047')
def test_file_parse(self):
# issue1134: all encodings outside latin-1 and utf-8 fail on
# multiline strings and long lines (>512 columns)
unload(TESTFN)
filename = TESTFN + ".py"
f = open(filename, "w", encoding="cp1252")
sys.path.insert(0, os.curdir)
try:
with f:
f.write("# -*- coding: cp1252 -*-\n")
f.write("'''A short string\n")
f.write("'''\n")
f.write("'A very long string %s'\n" % ("X" * 1000))
importlib.invalidate_caches()
__import__(TESTFN)
finally:
del sys.path[0]
unlink(filename)
unlink(filename + "c")
unlink(filename + "o")
unload(TESTFN)
rmtree('__pycache__')
def test_error_from_string(self):
# See http://bugs.python.org/issue6289
input = "# coding: ascii\n\N{SNOWMAN}".encode('utf-8')
with self.assertRaises(SyntaxError) as c:
compile(input, "<string>", "exec")
expected = "'ascii' codec can't decode byte 0xe2 in position 16: " \
"ordinal not in range(128)"
self.assertTrue(c.exception.args[0].startswith(expected),
msg=c.exception.args[0])
if __name__ == "__main__":
unittest.main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
tiagofrepereira2012/tensorflow
|
refs/heads/master
|
tensorflow/contrib/tensor_forest/client/random_forest.py
|
1
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A tf.learn implementation of tensor_forest (extremely random forests)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.contrib.tensor_forest.client import eval_metrics
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import session_run_hook
KEYS_NAME = 'keys'
LOSS_NAME = 'rf_training_loss'
TREE_PATHS_PREDICTION_KEY = 'tree_paths'
VARIANCE_PREDICTION_KEY = 'regression_variance'
EPSILON = 0.000001
def _assert_float32(tensors):
"""Assert all tensors are float32.
Args:
tensors: `Tensor` or `dict` of `Tensor` objects.
Raises:
TypeError: if any tensor is not float32.
"""
if not isinstance(tensors, dict):
tensors = [tensors]
else:
tensors = tensors.values()
for tensor in tensors:
if tensor.dtype.base_dtype != dtypes.float32:
raise TypeError('Expected dtype=float32, %s.' % tensor)
class TensorForestRunOpAtEndHook(session_run_hook.SessionRunHook):
def __init__(self, op_dict):
"""Ops is a dict of {name: op} to run before the session is destroyed."""
self._ops = op_dict
def end(self, session):
for name in sorted(self._ops.keys()):
logging.info('{0}: {1}'.format(name, session.run(self._ops[name])))
class TensorForestLossHook(session_run_hook.SessionRunHook):
"""Monitor to request stop when loss stops decreasing."""
def __init__(self,
early_stopping_rounds,
early_stopping_loss_threshold=None,
loss_op=None):
self.early_stopping_rounds = early_stopping_rounds
self.early_stopping_loss_threshold = early_stopping_loss_threshold
self.loss_op = loss_op
self.min_loss = None
self.last_step = -1
# self.steps records the number of steps for which the loss has been
# non-decreasing
self.steps = 0
def before_run(self, run_context):
loss = (self.loss_op if self.loss_op is not None else
run_context.session.graph.get_operation_by_name(
LOSS_NAME).outputs[0])
return session_run_hook.SessionRunArgs(
{'global_step': contrib_framework.get_global_step(),
'current_loss': loss})
def after_run(self, run_context, run_values):
current_loss = run_values.results['current_loss']
current_step = run_values.results['global_step']
self.steps += 1
# Guard against the global step going backwards, which might happen
# if we recover from something.
if self.last_step == -1 or self.last_step > current_step:
logging.info('TensorForestLossHook resetting last_step.')
self.last_step = current_step
self.steps = 0
self.min_loss = None
return
self.last_step = current_step
if (self.min_loss is None or current_loss <
(self.min_loss - self.min_loss * self.early_stopping_loss_threshold)):
self.min_loss = current_loss
self.steps = 0
if self.steps > self.early_stopping_rounds:
logging.info('TensorForestLossHook requesting stop.')
run_context.request_stop()
class EveryCheckpointPreSaveListener(
basic_session_run_hooks.CheckpointSaverListener):
"""Runs a given op before each checkpoint save."""
def __init__(self, op):
"""Initializes the object.
Args:
op: An op to run before each checkpoint save.
"""
self._op = op
def before_save(self, session, global_step_value):
session.run(self._op)
def get_default_head(params, weights_name, name=None):
if params.regression:
return head_lib.regression_head(
weight_column_name=weights_name,
label_dimension=params.num_outputs,
enable_centered_bias=False,
head_name=name)
else:
return head_lib.multi_class_head(
params.num_classes,
weight_column_name=weights_name,
enable_centered_bias=False,
head_name=name)
def get_model_fn(params,
graph_builder_class,
device_assigner,
weights_name=None,
model_head=None,
keys_name=None,
early_stopping_rounds=100,
early_stopping_loss_threshold=0.001,
num_trainers=1,
trainer_id=0,
report_feature_importances=False,
local_eval=False,
head_scope=None):
"""Return a model function given a way to construct a graph builder."""
if model_head is None:
model_head = get_default_head(params, weights_name)
def _model_fn(features, labels, mode):
"""Function that returns predictions, training loss, and training op."""
if (isinstance(features, ops.Tensor) or
isinstance(features, sparse_tensor.SparseTensor)):
features = {'features': features}
weights = None
if weights_name and weights_name in features:
weights = features.pop(weights_name)
keys = None
if keys_name and keys_name in features:
keys = features.pop(keys_name)
# If we're doing eval, optionally ignore device_assigner.
# Also ignore device assigner if we're exporting (mode == INFER)
dev_assn = device_assigner
if (mode == model_fn_lib.ModeKeys.INFER or
(local_eval and mode == model_fn_lib.ModeKeys.EVAL)):
dev_assn = None
graph_builder = graph_builder_class(params,
device_assigner=dev_assn)
logits, tree_paths, regression_variance = graph_builder.inference_graph(
features)
summary.scalar('average_tree_size', graph_builder.average_size())
# For binary classification problems, convert probabilities to logits.
# Includes hack to get around the fact that a probability might be 0 or 1.
if not params.regression and params.num_classes == 2:
class_1_probs = array_ops.slice(logits, [0, 1], [-1, 1])
logits = math_ops.log(
math_ops.maximum(class_1_probs / math_ops.maximum(
1.0 - class_1_probs, EPSILON), EPSILON))
# labels might be None if we're doing prediction (which brings up the
# question of why we force everything to adhere to a single model_fn).
training_graph = None
training_hooks = []
if labels is not None and mode == model_fn_lib.ModeKeys.TRAIN:
with ops.control_dependencies([logits.op]):
training_graph = control_flow_ops.group(
graph_builder.training_graph(
features, labels, input_weights=weights,
num_trainers=num_trainers,
trainer_id=trainer_id),
state_ops.assign_add(contrib_framework.get_global_step(), 1))
# Put weights back in
if weights is not None:
features[weights_name] = weights
# TensorForest's training graph isn't calculated directly from the loss
# like many other models.
def _train_fn(unused_loss):
return training_graph
model_ops = model_head.create_model_fn_ops(
features=features,
labels=labels,
mode=mode,
train_op_fn=_train_fn,
logits=logits,
scope=head_scope)
# Ops are run in lexigraphical order of their keys. Run the resource
# clean-up op last.
all_handles = graph_builder.get_all_resource_handles()
ops_at_end = {
'9: clean up resources': control_flow_ops.group(
*[resource_variable_ops.destroy_resource_op(handle)
for handle in all_handles])}
if report_feature_importances:
ops_at_end['1: feature_importances'] = (
graph_builder.feature_importances())
training_hooks.append(TensorForestRunOpAtEndHook(ops_at_end))
if early_stopping_rounds:
training_hooks.append(
TensorForestLossHook(
early_stopping_rounds,
early_stopping_loss_threshold=early_stopping_loss_threshold,
loss_op=model_ops.loss))
model_ops.training_hooks.extend(training_hooks)
if keys is not None:
model_ops.predictions[keys_name] = keys
if params.inference_tree_paths:
model_ops.predictions[TREE_PATHS_PREDICTION_KEY] = tree_paths
if params.regression:
model_ops.predictions[VARIANCE_PREDICTION_KEY] = regression_variance
return model_ops
return _model_fn
class TensorForestEstimator(estimator.Estimator):
"""An estimator that can train and evaluate a random forest.
Example:
```python
params = tf.contrib.tensor_forest.python.tensor_forest.ForestHParams(
num_classes=2, num_features=40, num_trees=10, max_nodes=1000)
# Estimator using the default graph builder.
estimator = TensorForestEstimator(params, model_dir=model_dir)
# Or estimator using TrainingLossForest as the graph builder.
estimator = TensorForestEstimator(
params, graph_builder_class=tensor_forest.TrainingLossForest,
model_dir=model_dir)
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.fit(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
# Predict returns an iterable of dicts.
results = list(estimator.predict(x=x))
prob0 = results[0][eval_metrics.INFERENCE_PROB_NAME]
prediction0 = results[0][eval_metrics.INFERENCE_PRED_NAME]
```
"""
def __init__(self,
params,
device_assigner=None,
model_dir=None,
graph_builder_class=tensor_forest.RandomForestGraphs,
config=None,
weights_name=None,
keys_name=None,
feature_engineering_fn=None,
early_stopping_rounds=100,
early_stopping_loss_threshold=0.001,
num_trainers=1,
trainer_id=0,
report_feature_importances=False,
local_eval=False,
version=None,
head=None):
"""Initializes a TensorForestEstimator instance.
Args:
params: ForestHParams object that holds random forest hyperparameters.
These parameters will be passed into `model_fn`.
device_assigner: An `object` instance that controls how trees get
assigned to devices. If `None`, will use
`tensor_forest.RandomForestDeviceAssigner`.
model_dir: Directory to save model parameters, graph, etc. To continue
training a previously saved model, load checkpoints saved to this
directory into an estimator.
graph_builder_class: An `object` instance that defines how TF graphs for
random forest training and inference are built. By default will use
`tensor_forest.RandomForestGraphs`. Can be overridden by version
kwarg.
config: `RunConfig` object to configure the runtime settings.
weights_name: A string defining feature column name representing
weights. Will be multiplied by the loss of the example. Used to
downweight or boost examples during training.
keys_name: A string naming one of the features to strip out and
pass through into the inference/eval results dict. Useful for
associating specific examples with their prediction.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
early_stopping_rounds: Allows training to terminate early if the forest is
no longer growing. 100 by default. Set to a Falsy value to disable
the default training hook.
early_stopping_loss_threshold: Percentage (as fraction) that loss must
improve by within early_stopping_rounds steps, otherwise training will
terminate.
num_trainers: Number of training jobs, which will partition trees
among them.
trainer_id: Which trainer this instance is.
report_feature_importances: If True, print out feature importances
during evaluation.
local_eval: If True, don't use a device assigner for eval. This is to
support some common setups where eval is done on a single machine, even
though training might be distributed.
version: Unused.
head: A heads_lib.Head object that calculates losses and such. If None,
one will be automatically created based on params.
Returns:
A `TensorForestEstimator` instance.
"""
super(TensorForestEstimator, self).__init__(
model_fn=get_model_fn(
params.fill(),
graph_builder_class,
device_assigner,
model_head=head,
weights_name=weights_name,
keys_name=keys_name,
early_stopping_rounds=early_stopping_rounds,
early_stopping_loss_threshold=early_stopping_loss_threshold,
num_trainers=num_trainers,
trainer_id=trainer_id,
report_feature_importances=report_feature_importances,
local_eval=local_eval),
model_dir=model_dir,
config=config,
feature_engineering_fn=feature_engineering_fn)
def get_combined_model_fn(model_fns):
"""Get a combined model function given a list of other model fns.
The model function returned will call the individual model functions and
combine them appropriately. For:
training ops: tf.group them.
loss: average them.
predictions: concat probabilities such that predictions[*][0-C1] are the
probablities for output 1 (where C1 is the number of classes in output 1),
predictions[*][C1-(C1+C2)] are the probabilities for output 2 (where C2
is the number of classes in output 2), etc. Also stack predictions such
that predictions[i][j] is the class prediction for example i and output j.
This assumes that labels are 2-dimensional, with labels[i][j] being the
label for example i and output j, where forest j is trained using only
output j.
Args:
model_fns: A list of model functions obtained from get_model_fn.
Returns:
A ModelFnOps instance.
"""
def _model_fn(features, labels, mode):
"""Function that returns predictions, training loss, and training op."""
model_fn_ops = []
for i in range(len(model_fns)):
with variable_scope.variable_scope('label_{0}'.format(i)):
sliced_labels = array_ops.slice(labels, [0, i], [-1, 1])
model_fn_ops.append(
model_fns[i](features, sliced_labels, mode))
training_hooks = []
for mops in model_fn_ops:
training_hooks += mops.training_hooks
predictions = {}
if (mode == model_fn_lib.ModeKeys.EVAL or
mode == model_fn_lib.ModeKeys.INFER):
# Flatten the probabilities into one dimension.
predictions[eval_metrics.INFERENCE_PROB_NAME] = array_ops.concat(
[mops.predictions[eval_metrics.INFERENCE_PROB_NAME]
for mops in model_fn_ops], axis=1)
predictions[eval_metrics.INFERENCE_PRED_NAME] = array_ops.stack(
[mops.predictions[eval_metrics.INFERENCE_PRED_NAME]
for mops in model_fn_ops], axis=1)
loss = None
if (mode == model_fn_lib.ModeKeys.EVAL or
mode == model_fn_lib.ModeKeys.TRAIN):
loss = math_ops.reduce_sum(
array_ops.stack(
[mops.loss for mops in model_fn_ops])) / len(model_fn_ops)
train_op = None
if mode == model_fn_lib.ModeKeys.TRAIN:
train_op = control_flow_ops.group(
*[mops.train_op for mops in model_fn_ops])
return model_fn_lib.ModelFnOps(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
training_hooks=training_hooks,
scaffold=None,
output_alternatives=None)
return _model_fn
class MultiForestMultiHeadEstimator(estimator.Estimator):
"""An estimator that can train a forest for a multi-headed problems.
This class essentially trains separate forests (each with their own
ForestHParams) for each output.
For multi-headed regression, a single-headed TensorForestEstimator can
be used to train a single model that predicts all outputs. This class can
be used to train separate forests for each output.
"""
def __init__(self, params_list, device_assigner=None, model_dir=None,
graph_builder_class=tensor_forest.RandomForestGraphs,
config=None, weights_name=None, keys_name=None,
feature_engineering_fn=None,
early_stopping_rounds=100,
num_trainers=1, trainer_id=0,
report_feature_importances=False,
local_eval=False):
"""Initializes a TensorForestEstimator instance.
Args:
params_list: A list of ForestHParams objects for each head, given in order
of outputs in the label tensor to be trained on.
device_assigner: An `object` instance that controls how trees get
assigned to devices. If `None`, will use
`tensor_forest.RandomForestDeviceAssigner`.
model_dir: Directory to save model parameters, graph, etc. To continue
training a previously saved model, load checkpoints saved to this
directory into an estimator.
graph_builder_class: An `object` instance that defines how TF graphs for
random forest training and inference are built. By default will use
`tensor_forest.RandomForestGraphs`.
config: `RunConfig` object to configure the runtime settings.
weights_name: A string defining feature column name representing
weights. Will be multiplied by the loss of the example. Used to
downweight or boost examples during training.
keys_name: A string naming one of the features to strip out and
pass through into the inference/eval results dict. Useful for
associating specific examples with their prediction.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
early_stopping_rounds: Allows training to terminate early if the forest is
no longer growing. 100 by default. Set to a Falsy value to disable
the default training hook.
num_trainers: Number of training jobs, which will partition trees
among them.
trainer_id: Which trainer this instance is.
report_feature_importances: If True, print out feature importances
during evaluation.
local_eval: If True, don't use a device assigner for eval. This is to
support some common setups where eval is done on a single machine, even
though training might be distributed.
Returns:
A `TensorForestEstimator` instance.
"""
model_fns = []
for i in range(len(params_list)):
params = params_list[i].fill()
model_fns.append(
get_model_fn(
params,
graph_builder_class,
device_assigner,
model_head=get_default_head(
params, weights_name, name='head{0}'.format(i)),
weights_name=weights_name,
keys_name=keys_name,
early_stopping_rounds=early_stopping_rounds,
num_trainers=num_trainers,
trainer_id=trainer_id,
report_feature_importances=report_feature_importances,
local_eval=local_eval,
head_scope='output{0}'.format(i)))
super(MultiForestMultiHeadEstimator, self).__init__(
model_fn=get_combined_model_fn(model_fns),
model_dir=model_dir,
config=config,
feature_engineering_fn=feature_engineering_fn)
|
xiandiancloud/edx-platform-Y
|
refs/heads/master
|
common/djangoapps/student/management/commands/__init__.py
|
12133432
| |
incuna/incuna-test-utils
|
refs/heads/master
|
incuna_test_utils/management/__init__.py
|
12133432
| |
krummler/zxing-ios
|
refs/heads/master
|
cpp/scons/scons-local-2.0.0.final.0/SCons/Tool/mwcc.py
|
34
|
"""SCons.Tool.mwcc
Tool-specific initialization for the Metrowerks CodeWarrior compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/mwcc.py 5023 2010/06/14 22:05:46 scons"
import os
import os.path
import SCons.Util
def set_vars(env):
"""Set MWCW_VERSION, MWCW_VERSIONS, and some codewarrior environment vars
MWCW_VERSIONS is set to a list of objects representing installed versions
MWCW_VERSION is set to the version object that will be used for building.
MWCW_VERSION can be set to a string during Environment
construction to influence which version is chosen, otherwise
the latest one from MWCW_VERSIONS is used.
Returns true if at least one version is found, false otherwise
"""
desired = env.get('MWCW_VERSION', '')
# return right away if the variables are already set
if isinstance(desired, MWVersion):
return 1
elif desired is None:
return 0
versions = find_versions()
version = None
if desired:
for v in versions:
if str(v) == desired:
version = v
elif versions:
version = versions[-1]
env['MWCW_VERSIONS'] = versions
env['MWCW_VERSION'] = version
if version is None:
return 0
env.PrependENVPath('PATH', version.clpath)
env.PrependENVPath('PATH', version.dllpath)
ENV = env['ENV']
ENV['CWFolder'] = version.path
ENV['LM_LICENSE_FILE'] = version.license
plus = lambda x: '+%s' % x
ENV['MWCIncludes'] = os.pathsep.join(map(plus, version.includes))
ENV['MWLibraries'] = os.pathsep.join(map(plus, version.libs))
return 1
def find_versions():
"""Return a list of MWVersion objects representing installed versions"""
versions = []
### This function finds CodeWarrior by reading from the registry on
### Windows. Some other method needs to be implemented for other
### platforms, maybe something that calls env.WhereIs('mwcc')
if SCons.Util.can_read_reg:
try:
HLM = SCons.Util.HKEY_LOCAL_MACHINE
product = 'SOFTWARE\\Metrowerks\\CodeWarrior\\Product Versions'
product_key = SCons.Util.RegOpenKeyEx(HLM, product)
i = 0
while True:
name = product + '\\' + SCons.Util.RegEnumKey(product_key, i)
name_key = SCons.Util.RegOpenKeyEx(HLM, name)
try:
version = SCons.Util.RegQueryValueEx(name_key, 'VERSION')
path = SCons.Util.RegQueryValueEx(name_key, 'PATH')
mwv = MWVersion(version[0], path[0], 'Win32-X86')
versions.append(mwv)
except SCons.Util.RegError:
pass
i = i + 1
except SCons.Util.RegError:
pass
return versions
class MWVersion(object):
def __init__(self, version, path, platform):
self.version = version
self.path = path
self.platform = platform
self.clpath = os.path.join(path, 'Other Metrowerks Tools',
'Command Line Tools')
self.dllpath = os.path.join(path, 'Bin')
# The Metrowerks tools don't store any configuration data so they
# are totally dumb when it comes to locating standard headers,
# libraries, and other files, expecting all the information
# to be handed to them in environment variables. The members set
# below control what information scons injects into the environment
### The paths below give a normal build environment in CodeWarrior for
### Windows, other versions of CodeWarrior might need different paths.
msl = os.path.join(path, 'MSL')
support = os.path.join(path, '%s Support' % platform)
self.license = os.path.join(path, 'license.dat')
self.includes = [msl, support]
self.libs = [msl, support]
def __str__(self):
return self.version
CSuffixes = ['.c', '.C']
CXXSuffixes = ['.cc', '.cpp', '.cxx', '.c++', '.C++']
def generate(env):
"""Add Builders and construction variables for the mwcc to an Environment."""
import SCons.Defaults
import SCons.Tool
set_vars(env)
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
for suffix in CSuffixes:
static_obj.add_action(suffix, SCons.Defaults.CAction)
shared_obj.add_action(suffix, SCons.Defaults.ShCAction)
for suffix in CXXSuffixes:
static_obj.add_action(suffix, SCons.Defaults.CXXAction)
shared_obj.add_action(suffix, SCons.Defaults.ShCXXAction)
env['CCCOMFLAGS'] = '$CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS -nolink -o $TARGET $SOURCES'
env['CC'] = 'mwcc'
env['CCCOM'] = '$CC $CFLAGS $CCFLAGS $CCCOMFLAGS'
env['CXX'] = 'mwcc'
env['CXXCOM'] = '$CXX $CXXFLAGS $CCCOMFLAGS'
env['SHCC'] = '$CC'
env['SHCCFLAGS'] = '$CCFLAGS'
env['SHCFLAGS'] = '$CFLAGS'
env['SHCCCOM'] = '$SHCC $SHCFLAGS $SHCCFLAGS $CCCOMFLAGS'
env['SHCXX'] = '$CXX'
env['SHCXXFLAGS'] = '$CXXFLAGS'
env['SHCXXCOM'] = '$SHCXX $SHCXXFLAGS $CCCOMFLAGS'
env['CFILESUFFIX'] = '.c'
env['CXXFILESUFFIX'] = '.cpp'
env['CPPDEFPREFIX'] = '-D'
env['CPPDEFSUFFIX'] = ''
env['INCPREFIX'] = '-I'
env['INCSUFFIX'] = ''
#env['PCH'] = ?
#env['PCHSTOP'] = ?
def exists(env):
return set_vars(env)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
flochaz/horizon
|
refs/heads/stable/juno
|
openstack_dashboard/dashboards/vledashboard/dashboard.py
|
1
|
from django.utils.translation import ugettext_lazy as _
import horizon
import openstack_dashboard.dashboards.vledashboard.stacks
class Vledashboard(horizon.Dashboard):
slug = "vledashboard"
name = _("Vledashboard")
panels = (Stacks) # Add your panels here.
default_panel = 'stacks' # Specify the slug of the dashboard's default panel.
horizon.register(Vledashboard)
|
google-code-export/tvstreamrecord
|
refs/heads/master
|
cherrypy/lib/auth_basic.py
|
88
|
# This file is part of CherryPy <http://www.cherrypy.org/>
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:expandtab:fileencoding=utf-8
__doc__ = """This module provides a CherryPy 3.x tool which implements
the server-side of HTTP Basic Access Authentication, as described in :rfc:`2617`.
Example usage, using the built-in checkpassword_dict function which uses a dict
as the credentials store::
userpassdict = {'bird' : 'bebop', 'ornette' : 'wayout'}
checkpassword = cherrypy.lib.auth_basic.checkpassword_dict(userpassdict)
basic_auth = {'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'earth',
'tools.auth_basic.checkpassword': checkpassword,
}
app_config = { '/' : basic_auth }
"""
__author__ = 'visteya'
__date__ = 'April 2009'
import binascii
from cherrypy._cpcompat import base64_decode
import cherrypy
def checkpassword_dict(user_password_dict):
"""Returns a checkpassword function which checks credentials
against a dictionary of the form: {username : password}.
If you want a simple dictionary-based authentication scheme, use
checkpassword_dict(my_credentials_dict) as the value for the
checkpassword argument to basic_auth().
"""
def checkpassword(realm, user, password):
p = user_password_dict.get(user)
return p and p == password or False
return checkpassword
def basic_auth(realm, checkpassword, debug=False):
"""A CherryPy tool which hooks at before_handler to perform
HTTP Basic Access Authentication, as specified in :rfc:`2617`.
If the request has an 'authorization' header with a 'Basic' scheme, this
tool attempts to authenticate the credentials supplied in that header. If
the request has no 'authorization' header, or if it does but the scheme is
not 'Basic', or if authentication fails, the tool sends a 401 response with
a 'WWW-Authenticate' Basic header.
realm
A string containing the authentication realm.
checkpassword
A callable which checks the authentication credentials.
Its signature is checkpassword(realm, username, password). where
username and password are the values obtained from the request's
'authorization' header. If authentication succeeds, checkpassword
returns True, else it returns False.
"""
if '"' in realm:
raise ValueError('Realm cannot contain the " (quote) character.')
request = cherrypy.serving.request
auth_header = request.headers.get('authorization')
if auth_header is not None:
try:
scheme, params = auth_header.split(' ', 1)
if scheme.lower() == 'basic':
username, password = base64_decode(params).split(':', 1)
if checkpassword(realm, username, password):
if debug:
cherrypy.log('Auth succeeded', 'TOOLS.AUTH_BASIC')
request.login = username
return # successful authentication
except (ValueError, binascii.Error): # split() error, base64.decodestring() error
raise cherrypy.HTTPError(400, 'Bad Request')
# Respond with 401 status and a WWW-Authenticate header
cherrypy.serving.response.headers['www-authenticate'] = 'Basic realm="%s"' % realm
raise cherrypy.HTTPError(401, "You are not authorized to access that resource")
|
getpelican/pelican-plugins
|
refs/heads/master
|
share_post/__init__.py
|
8
|
from .share_post import * # noqa
|
ayeminoo/futuresonic
|
refs/heads/master
|
futuresonic-xbmc/plugin.audio.futuresonic/default.py
|
4
|
'''
FutureSonic XBMC Plugin
Copyright (C) 2013 t0mm0, Madevil
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from resources.lib.FutureSonic import Addon, FutureSonic
import sys
import xbmcgui
Addon.plugin_url = sys.argv[0]
Addon.plugin_handle = int(sys.argv[1])
Addon.plugin_queries = Addon.parse_query(sys.argv[2][1:])
futuresonic = FutureSonic.FutureSonic(Addon.get_setting('server'),
Addon.get_setting('user'),
Addon.get_setting('password'))
xbmc.log('plugin queries: ' + str(Addon.plugin_queries))
xbmc.log('plugin handle: ' + str(Addon.plugin_handle))
if futuresonic.ping():
if Addon.plugin_queries['mode'] == 'list_indexes':
futuresonic.get_indexes(Addon.plugin_queries['folder_id'])
elif Addon.plugin_queries['mode'] == 'list_playlists':
futuresonic.get_playlists()
elif Addon.plugin_queries['mode'] == 'playlist':
futuresonic.get_playlist(Addon.plugin_queries['playlist_id'])
elif Addon.plugin_queries['mode'] == 'get_music_directory':
futuresonic.get_music_directory(Addon.plugin_queries['id'])
elif Addon.plugin_queries['mode'] == 'index':
futuresonic.get_music_folders()
elif Addon.plugin_queries['mode'] == 'starred':
page = Addon.plugin_queries.get('page', 0)
sort = Addon.plugin_queries.get('sort', '')
if sort:
futuresonic.get_album_list(sort, page)
else:
Addon.add_directory({'mode': 'albums', 'sort': 'starredArtist'}, Addon.get_string(30039))
Addon.add_directory({'mode': 'albums', 'sort': 'starred'}, Addon.get_string(30031))
Addon.add_directory({'mode': 'starredsongs'}, Addon.get_string(30043))
Addon.end_of_directory()
elif Addon.plugin_queries['mode'] == 'starredsongs':
futuresonic.get_starred()
elif Addon.plugin_queries['mode'] == 'lastplayedsongs':
futuresonic.get_lastplayed()
elif Addon.plugin_queries['mode'] == 'newaddedsongs':
futuresonic.get_newadded()
elif Addon.plugin_queries['mode'] == 'play':
futuresonic.play(Addon.plugin_queries['id'])
elif Addon.plugin_queries['mode'] == 'albums':
page = Addon.plugin_queries.get('page', 0)
sort = Addon.plugin_queries.get('sort', '')
if sort:
futuresonic.get_album_list(sort, page)
else:
Addon.add_directory({'mode': 'albums', 'sort': 'hot'}, Addon.get_string(30038))
Addon.add_directory({'mode': 'albums', 'sort': 'tip'}, Addon.get_string(30042))
Addon.add_directory({'mode': 'albums', 'sort': 'newest'}, Addon.get_string(30033))
Addon.add_directory({'mode': 'albums', 'sort': 'highest'}, Addon.get_string(30034))
Addon.add_directory({'mode': 'albums', 'sort': 'frequent'},Addon.get_string(30035))
Addon.add_directory({'mode': 'albums', 'sort': 'recent'}, Addon.get_string(30036))
Addon.add_directory({'mode': 'albums', 'sort': 'random'}, Addon.get_string(30032))
Addon.end_of_directory()
elif Addon.plugin_queries['mode'] == 'random':
random_mode = Addon.plugin_queries.get('random_mode', False)
if random_mode:
queries = {}
if Addon.plugin_queries.get('from_year'):
queries['fromYear'] = Addon.plugin_queries.get('from_year')
if Addon.plugin_queries.get('to_year'):
queries['toYear'] = Addon.plugin_queries.get('to_year')
if Addon.plugin_queries.get('genre'):
queries['genre'] = Addon.plugin_queries.get('genre')
dialog = xbmcgui.Dialog()
if random_mode == 'custom':
rnd = dialog.select(Addon.get_string(30013),
[Addon.get_string(30014),
Addon.get_string(30015),
Addon.get_string(30016),
Addon.get_string(30017)])
if rnd == 1 or rnd == 3:
queries['fromYear'] = dialog.numeric(0,
Addon.get_string(30018),
'2000')
queries['toYear'] = dialog.numeric(0,
Addon.get_string(30019),
queries['fromYear'])
if rnd >= 2:
queries['genre'] = Addon.get_input(Addon.get_string(30020))
queries['size'] = dialog.numeric(0, Addon.get_string(30021), '10')
futuresonic.get_random(queries)
else:
Addon.add_directory({'mode': 'random', 'random_mode': 'custom'}, Addon.get_string(30022))
Addon.add_directory({'mode': 'random', 'random_mode': 'preset', 'from_year': 1950, 'to_year': 1959}, Addon.get_string(30023))
Addon.add_directory({'mode': 'random', 'random_mode': 'preset', 'from_year': 1960, 'to_year': 1969}, Addon.get_string(30024))
Addon.add_directory({'mode': 'random', 'random_mode': 'preset', 'from_year': 1970, 'to_year': 1979}, Addon.get_string(30025))
Addon.add_directory({'mode': 'random', 'random_mode': 'preset', 'from_year': 1980, 'to_year': 1989}, Addon.get_string(30026))
Addon.add_directory({'mode': 'random', 'random_mode': 'preset', 'from_year': 1990, 'to_year': 1999}, Addon.get_string(30027))
Addon.add_directory({'mode': 'random', 'random_mode': 'preset', 'from_year': 2000, 'to_year': 2009}, Addon.get_string(30028))
Addon.add_directory({'mode': 'random', 'random_mode': 'preset', 'from_year': 2010, 'to_year': 2019}, Addon.get_string(30029))
Addon.end_of_directory()
elif Addon.plugin_queries['mode'] == 'search':
search_mode = Addon.plugin_queries.get('search_mode', '')
if search_mode:
q = Addon.plugin_queries.get('q', '')
if not q:
q = Addon.get_input(Addon.get_string({'artist': 30007,
'album': 30008,
'song': 30009}[search_mode]))
if q:
futuresonic.search(search_mode, q)
else:
Addon.add_directory({'mode': 'search', 'search_mode': 'artist'}, Addon.get_string(30007))
Addon.add_directory({'mode': 'search', 'search_mode': 'album'}, Addon.get_string(30008))
Addon.add_directory({'mode': 'search', 'search_mode': 'song'}, Addon.get_string(30009))
Addon.end_of_directory()
else:
futuresonic.get_index()
else:
Addon.show_settings()
|
HybridF5/nova
|
refs/heads/master
|
nova/api/openstack/compute/ips.py
|
26
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
import nova
from nova.api.openstack import common
from nova.api.openstack.compute.views import addresses as views_addresses
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.i18n import _
ALIAS = 'ips'
authorize = extensions.os_compute_authorizer(ALIAS)
class IPsController(wsgi.Controller):
"""The servers addresses API controller for the OpenStack API."""
# Note(gmann): here using V2 view builder instead of V3 to have V2.1
# server ips response same as V2 which does not include "OS-EXT-IPS:type"
# & "OS-EXT-IPS-MAC:mac_addr". If needed those can be added with
# microversion by using V2.1 view builder.
_view_builder_class = views_addresses.ViewBuilder
def __init__(self, **kwargs):
super(IPsController, self).__init__(**kwargs)
self._compute_api = nova.compute.API(skip_policy_check=True)
@extensions.expected_errors(404)
def index(self, req, server_id):
context = req.environ["nova.context"]
authorize(context, action='index')
instance = common.get_instance(self._compute_api, context, server_id)
networks = common.get_networks_for_instance(context, instance)
return self._view_builder.index(networks)
@extensions.expected_errors(404)
def show(self, req, server_id, id):
context = req.environ["nova.context"]
authorize(context, action='show')
instance = common.get_instance(self._compute_api, context, server_id)
networks = common.get_networks_for_instance(context, instance)
if id not in networks:
msg = _("Instance is not a member of specified network")
raise exc.HTTPNotFound(explanation=msg)
return self._view_builder.show(networks[id], id)
class IPs(extensions.V21APIExtensionBase):
"""Server addresses."""
name = "Ips"
alias = ALIAS
version = 1
def get_resources(self):
parent = {'member_name': 'server',
'collection_name': 'servers'}
resources = [
extensions.ResourceExtension(
ALIAS, IPsController(), parent=parent, member_name='ip')]
return resources
def get_controller_extensions(self):
return []
|
xfournet/intellij-community
|
refs/heads/master
|
python/testData/resolve/multiFile/namespacePackageImport/p1/m1.py
|
819
|
def foo():
pass
|
cytoscape-ci/service-sample-python
|
refs/heads/master
|
app.py
|
1
|
from flask import Flask
from flask_restful import Api
from registerer import ServiceRegisterer
from services.service_id_mapper import IdMapperService
from services.service_status import ServiceStatus
PORT_NUMBER = 3333
app = Flask(__name__)
api = Api(app)
# Routing
api.add_resource(ServiceStatus, '/') # Simply returns version of API
api.add_resource(IdMapperService, '/map') # ID Mapping service
if __name__ == '__main__':
"""
Sample REST API Server registering itself to CytoAgent.
"""
# Register this API server as a service
ServiceRegisterer.register("idmapping", port=PORT_NUMBER, capacity=4)
# Start the App
app.run(debug=True, host='0.0.0.0', port=PORT_NUMBER, use_reloader=False)
|
macioosch/dynamo-hard-spheres-sim
|
refs/heads/master
|
create_datafiles.py
|
1
|
#!/usr/bin/env python2
from numpy import linspace
from os import system
for i in linspace(0.1, 1.4, 27):
system("dynamod -m 0 -C 7 -d {0} --i1 0 -r 1 -o config.start.{0:.5f}.xml".format(i))
|
purism/pdak
|
refs/heads/master
|
tests/test_multiprocessing.py
|
1
|
#!/usr/bin/python
from base_test import DakTestCase
from daklib.dakmultiprocessing import DakProcessPool, \
PROC_STATUS_SUCCESS, PROC_STATUS_MISCFAILURE, \
PROC_STATUS_EXCEPTION, PROC_STATUS_SIGNALRAISED
import signal
def async_function(num, num2):
from os import kill, getpid
if num == 1:
sigs = [signal.SIGTERM, signal.SIGPIPE, signal.SIGALRM, signal.SIGHUP]
kill(getpid(), sigs[num2])
if num2 == 3:
raise Exception('Test uncaught exception handling')
if num == 0 and num2 == 1:
return (PROC_STATUS_MISCFAILURE, 'Test custom error return')
return (PROC_STATUS_SUCCESS, 'blah, %d, %d' % (num, num2))
class DakProcessPoolTestCase(DakTestCase):
def testPool(self):
def alarm_handler(signum, frame):
raise AssertionError('Timed out')
# Shouldn't take us more than 15 seconds to run this test
signal.signal(signal.SIGALRM, alarm_handler)
signal.alarm(15)
p = DakProcessPool()
for s in range(3):
for j in range(4):
p.apply_async(async_function, [s, j])
p.close()
p.join()
signal.alarm(0)
signal.signal(signal.SIGALRM, signal.SIG_DFL)
expected = [(PROC_STATUS_SUCCESS, 'blah, 0, 0'),
(PROC_STATUS_MISCFAILURE, 'Test custom error return'),
(PROC_STATUS_SUCCESS, 'blah, 0, 2'),
(PROC_STATUS_EXCEPTION, 'Test uncaught exception handling'),
(PROC_STATUS_SIGNALRAISED, 15),
(PROC_STATUS_SIGNALRAISED, 13),
(PROC_STATUS_SIGNALRAISED, 14),
(PROC_STATUS_SIGNALRAISED, 1),
(PROC_STATUS_SUCCESS, 'blah, 2, 0'),
(PROC_STATUS_SUCCESS, 'blah, 2, 1'),
(PROC_STATUS_SUCCESS, 'blah, 2, 2'),
(PROC_STATUS_EXCEPTION, 'Test uncaught exception handling')]
self.assertEqual( len(p.results), len(expected) )
for r in range(len(p.results)):
self.assertEqual(p.results[r], expected[r])
|
pedrito2/moduleSP
|
refs/heads/master
|
avancement_possible.py
|
1
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
"""
Identifier tous les avancements possibles à une date donnée.
Usage: avancement.py [options] [SOURCE]
Arguments:
SOURCE Fichier .csv à traiter. Si rien n'est fourni, utilise google drive.
Options:
-h, --help Montre cette aide
--cis <cis> Ne faire la recherche que pour ce CIS [default: All]
--cie <cie> Ne faire la recherche que pour cette compagnie [default: All]
-d, --date <date> Se projeter à la <date> donnée (format jj/mm[/[aa]aa])[default: prochain_ccdspv]
-o, --output <file> Enregistrer le csv résultat dans <file> [default: stdout]
"""
from docopt import docopt
import gestionSP
import sys
arguments = docopt(__doc__)
print(arguments)
if arguments['SOURCE'] is None:
raise NotImplementedError("Recherche depuis google pas encore implémentée")
if arguments['--date'] == 'prochain_ccdspv':
print("Attention, la date prise en compte est aujourd'hui")
else:
raise NotImplementedError("Pas encore de prise en charge des dates")
if arguments['--output'] == 'stdout':
fichier_out = sys.stdout
else:
fichier_out = arguments['--output']
with gestionSP.bddRHfromCSV(arguments['SOURCE']) as SPs:
for sp in SPs:
if arguments['--cie'] in ('All', sp.cie) and arguments['--cis'] in ('All', sp.cis):
print(sp.cie, sp.cis, sp.nom, sp.prenom, sp.grade, sep=', ', end="\n", file=fichier_out)
# Sortie : Cie CIS Nom Prénom Grade_actuel Nommable(grade/appellation)
|
mjtamlyn/django
|
refs/heads/master
|
django/contrib/sessions/base_session.py
|
130
|
"""
This module allows importing AbstractBaseSession even
when django.contrib.sessions is not in INSTALLED_APPS.
"""
from django.db import models
from django.utils.translation import gettext_lazy as _
class BaseSessionManager(models.Manager):
def encode(self, session_dict):
"""
Return the given session dictionary serialized and encoded as a string.
"""
session_store_class = self.model.get_session_store_class()
return session_store_class().encode(session_dict)
def save(self, session_key, session_dict, expire_date):
s = self.model(session_key, self.encode(session_dict), expire_date)
if session_dict:
s.save()
else:
s.delete() # Clear sessions with no data.
return s
class AbstractBaseSession(models.Model):
session_key = models.CharField(_('session key'), max_length=40, primary_key=True)
session_data = models.TextField(_('session data'))
expire_date = models.DateTimeField(_('expire date'), db_index=True)
objects = BaseSessionManager()
class Meta:
abstract = True
verbose_name = _('session')
verbose_name_plural = _('sessions')
def __str__(self):
return self.session_key
@classmethod
def get_session_store_class(cls):
raise NotImplementedError
def get_decoded(self):
session_store_class = self.get_session_store_class()
return session_store_class().decode(self.session_data)
|
gangadharkadam/stfrappe
|
refs/heads/develop
|
frappe/core/doctype/idle_user/idle_user.py
|
4
|
from __future__ import unicode_literals
import frappe
def get_columns():
return ["User Name:295", "Last Logged on:Date:230","Site Name:395"]
def execute(filters=None):
columns = get_columns()
data = []
dbname=frappe.db.sql("""select site_name from `tabSubAdmin Info` """,as_dict=1)
lst=[]
qry_srt='select name,last_login from('
for key in dbname:
temp1 =key['site_name']
temp =key['site_name']
if temp.find('.')!= -1:
temp=temp.split('.')[0][:16]
else:
temp=temp[:16]
qry="SELECT name,last_login,'%s' as site_name FROM "%(temp1)
if temp :
qry+=temp+'.tabUser where name not in ("Guest","Administrator")'
lst.append(qry)
fin_qry=' UNION '.join(lst)
qry=qry_srt+fin_qry+" where doc_name='Administrator')foo ORDER BY creation DESC limit 5"
act_details=frappe.db.sql(fin_qry,as_list=1,debug=1)
data=act_details
return columns, data
def get_columns():
return [
"User Name:Data:220",
"Last Logged in on:Date:220",
"Site Name:Data:320"
]
|
ajohnson23/depot_tools
|
refs/heads/master
|
third_party/boto/datapipeline/__init__.py
|
12133432
| |
Tejal011089/Medsyn2_app
|
refs/heads/master
|
stock/report/item_wise_price_list_rate/__init__.py
|
12133432
| |
olemis/sqlalchemy
|
refs/heads/master
|
test/orm/test_of_type.py
|
25
|
from sqlalchemy.orm import Session, aliased, with_polymorphic, \
contains_eager, joinedload, subqueryload, relationship,\
subqueryload_all, joinedload_all
from sqlalchemy import and_
from sqlalchemy import testing, exc as sa_exc
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import assert_raises, assert_raises_message, eq_
from sqlalchemy.testing.schema import Column
from sqlalchemy.engine import default
from sqlalchemy.testing.entities import ComparableEntity
from sqlalchemy import Integer, String, ForeignKey
from .inheritance._poly_fixtures import Company, Person, Engineer, Manager, Boss, \
Machine, Paperwork, _PolymorphicFixtureBase, _Polymorphic,\
_PolymorphicPolymorphic, _PolymorphicUnions, _PolymorphicJoins,\
_PolymorphicAliasedJoins
class _PolymorphicTestBase(object):
__dialect__ = 'default'
def test_any_one(self):
sess = Session()
any_ = Company.employees.of_type(Engineer).any(
Engineer.primary_language == 'cobol')
eq_(sess.query(Company).filter(any_).one(), self.c2)
def test_any_two(self):
sess = Session()
calias = aliased(Company)
any_ = calias.employees.of_type(Engineer).any(
Engineer.primary_language == 'cobol')
eq_(sess.query(calias).filter(any_).one(), self.c2)
def test_any_three(self):
sess = Session()
any_ = Company.employees.of_type(Boss).any(
Boss.golf_swing == 'fore')
eq_(sess.query(Company).filter(any_).one(), self.c1)
def test_any_four(self):
sess = Session()
any_ = Company.employees.of_type(Boss).any(
Manager.manager_name == 'pointy')
eq_(sess.query(Company).filter(any_).one(), self.c1)
def test_any_five(self):
sess = Session()
any_ = Company.employees.of_type(Engineer).any(
and_(Engineer.primary_language == 'cobol'))
eq_(sess.query(Company).filter(any_).one(), self.c2)
def test_join_to_subclass_one(self):
sess = Session()
eq_(sess.query(Company)
.join(Company.employees.of_type(Engineer))
.filter(Engineer.primary_language == 'java').all(),
[self.c1])
def test_join_to_subclass_two(self):
sess = Session()
eq_(sess.query(Company)
.join(Company.employees.of_type(Engineer), 'machines')
.filter(Machine.name.ilike("%thinkpad%")).all(),
[self.c1])
def test_join_to_subclass_three(self):
sess = Session()
eq_(sess.query(Company, Engineer)
.join(Company.employees.of_type(Engineer))
.filter(Engineer.primary_language == 'java').count(),
1)
def test_join_to_subclass_four(self):
sess = Session()
# test [ticket:2093]
eq_(sess.query(Company.company_id, Engineer)
.join(Company.employees.of_type(Engineer))
.filter(Engineer.primary_language == 'java').count(),
1)
def test_join_to_subclass_five(self):
sess = Session()
eq_(sess.query(Company)
.join(Company.employees.of_type(Engineer))
.filter(Engineer.primary_language == 'java').count(),
1)
def test_with_polymorphic_join_compile_one(self):
sess = Session()
self.assert_compile(
sess.query(Company).join(
Company.employees.of_type(
with_polymorphic(Person, [Engineer, Manager],
aliased=True, flat=True)
)
),
"SELECT companies.company_id AS companies_company_id, "
"companies.name AS companies_name FROM companies "
"JOIN %s"
% (
self._polymorphic_join_target([Engineer, Manager])
)
)
def test_with_polymorphic_join_exec_contains_eager_one(self):
sess = Session()
def go():
wp = with_polymorphic(Person, [Engineer, Manager],
aliased=True, flat=True)
eq_(
sess.query(Company).join(
Company.employees.of_type(wp)
).order_by(Company.company_id, wp.person_id).\
options(contains_eager(Company.employees.of_type(wp))).all(),
[self.c1, self.c2]
)
self.assert_sql_count(testing.db, go, 1)
def test_with_polymorphic_join_exec_contains_eager_two(self):
sess = Session()
def go():
wp = with_polymorphic(Person, [Engineer, Manager], aliased=True)
eq_(
sess.query(Company).join(
Company.employees.of_type(wp)
).order_by(Company.company_id, wp.person_id).\
options(contains_eager(Company.employees, alias=wp)).all(),
[self.c1, self.c2]
)
self.assert_sql_count(testing.db, go, 1)
def test_with_polymorphic_any(self):
sess = Session()
wp = with_polymorphic(Person, [Engineer], aliased=True)
eq_(
sess.query(Company.company_id).\
filter(
Company.employees.of_type(wp).any(
wp.Engineer.primary_language == 'java')
).all(),
[(1, )]
)
def test_subqueryload_implicit_withpoly(self):
sess = Session()
def go():
eq_(
sess.query(Company).\
filter_by(company_id=1).\
options(subqueryload(Company.employees.of_type(Engineer))).\
all(),
[self._company_with_emps_fixture()[0]]
)
self.assert_sql_count(testing.db, go, 4)
def test_joinedload_implicit_withpoly(self):
sess = Session()
def go():
eq_(
sess.query(Company).\
filter_by(company_id=1).\
options(joinedload(Company.employees.of_type(Engineer))).\
all(),
[self._company_with_emps_fixture()[0]]
)
self.assert_sql_count(testing.db, go, 3)
def test_subqueryload_explicit_withpoly(self):
sess = Session()
def go():
target = with_polymorphic(Person, Engineer)
eq_(
sess.query(Company).\
filter_by(company_id=1).\
options(subqueryload(Company.employees.of_type(target))).\
all(),
[self._company_with_emps_fixture()[0]]
)
self.assert_sql_count(testing.db, go, 4)
def test_joinedload_explicit_withpoly(self):
sess = Session()
def go():
target = with_polymorphic(Person, Engineer, flat=True)
eq_(
sess.query(Company).\
filter_by(company_id=1).\
options(joinedload(Company.employees.of_type(target))).\
all(),
[self._company_with_emps_fixture()[0]]
)
self.assert_sql_count(testing.db, go, 3)
def test_joinedload_stacked_of_type(self):
sess = Session()
def go():
eq_(
sess.query(Company).
filter_by(company_id=1).
options(
joinedload(Company.employees.of_type(Manager)),
joinedload(Company.employees.of_type(Engineer))
).all(),
[self._company_with_emps_fixture()[0]]
)
self.assert_sql_count(testing.db, go, 2)
class PolymorphicPolymorphicTest(_PolymorphicTestBase, _PolymorphicPolymorphic):
def _polymorphic_join_target(self, cls):
from sqlalchemy.orm import class_mapper
from sqlalchemy.sql.expression import FromGrouping
m, sel = class_mapper(Person)._with_polymorphic_args(cls)
sel = FromGrouping(sel.alias(flat=True))
comp_sel = sel.compile(dialect=default.DefaultDialect())
return \
comp_sel.process(sel, asfrom=True).replace("\n", "") + \
" ON companies.company_id = people_1.company_id"
class PolymorphicUnionsTest(_PolymorphicTestBase, _PolymorphicUnions):
def _polymorphic_join_target(self, cls):
from sqlalchemy.orm import class_mapper
sel = class_mapper(Person)._with_polymorphic_selectable.element
comp_sel = sel.compile(dialect=default.DefaultDialect())
return \
comp_sel.process(sel, asfrom=True).replace("\n", "") + \
" AS anon_1 ON companies.company_id = anon_1.company_id"
class PolymorphicAliasedJoinsTest(_PolymorphicTestBase, _PolymorphicAliasedJoins):
def _polymorphic_join_target(self, cls):
from sqlalchemy.orm import class_mapper
sel = class_mapper(Person)._with_polymorphic_selectable.element
comp_sel = sel.compile(dialect=default.DefaultDialect())
return \
comp_sel.process(sel, asfrom=True).replace("\n", "") + \
" AS anon_1 ON companies.company_id = anon_1.people_company_id"
class PolymorphicJoinsTest(_PolymorphicTestBase, _PolymorphicJoins):
def _polymorphic_join_target(self, cls):
from sqlalchemy.orm import class_mapper
from sqlalchemy.sql.expression import FromGrouping
sel = FromGrouping(class_mapper(Person)._with_polymorphic_selectable.alias(flat=True))
comp_sel = sel.compile(dialect=default.DefaultDialect())
return \
comp_sel.process(sel, asfrom=True).replace("\n", "") + \
" ON companies.company_id = people_1.company_id"
def test_joinedload_explicit_with_unaliased_poly_compile(self):
sess = Session()
target = with_polymorphic(Person, Engineer)
q = sess.query(Company).\
filter_by(company_id=1).\
options(joinedload(Company.employees.of_type(target)))
assert_raises_message(
sa_exc.InvalidRequestError,
"Detected unaliased columns when generating joined load.",
q._compile_context
)
def test_joinedload_explicit_with_flataliased_poly_compile(self):
sess = Session()
target = with_polymorphic(Person, Engineer, flat=True)
q = sess.query(Company).\
filter_by(company_id=1).\
options(joinedload(Company.employees.of_type(target)))
self.assert_compile(q,
"SELECT companies.company_id AS companies_company_id, "
"companies.name AS companies_name, "
"people_1.person_id AS people_1_person_id, "
"people_1.company_id AS people_1_company_id, "
"people_1.name AS people_1_name, people_1.type AS people_1_type, "
"engineers_1.person_id AS engineers_1_person_id, "
"engineers_1.status AS engineers_1_status, "
"engineers_1.engineer_name AS engineers_1_engineer_name, "
"engineers_1.primary_language AS engineers_1_primary_language "
"FROM companies LEFT OUTER JOIN (people AS people_1 "
"LEFT OUTER JOIN engineers AS engineers_1 "
"ON people_1.person_id = engineers_1.person_id "
"LEFT OUTER JOIN managers AS managers_1 "
"ON people_1.person_id = managers_1.person_id) "
"ON companies.company_id = people_1.company_id "
"WHERE companies.company_id = :company_id_1 "
"ORDER BY people_1.person_id"
)
class SubclassRelationshipTest(testing.AssertsCompiledSQL, fixtures.DeclarativeMappedTest):
"""There's overlap here vs. the ones above."""
run_setup_classes = 'once'
run_setup_mappers = 'once'
run_inserts = 'once'
run_deletes = None
__dialect__ = 'default'
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Job(ComparableEntity, Base):
__tablename__ = "job"
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
type = Column(String(10))
container_id = Column(Integer, ForeignKey('data_container.id'))
__mapper_args__ = {"polymorphic_on": type}
class SubJob(Job):
__tablename__ = 'subjob'
id = Column(Integer, ForeignKey('job.id'), primary_key=True)
attr = Column(String(10))
__mapper_args__ = {"polymorphic_identity": "sub"}
class ParentThing(ComparableEntity, Base):
__tablename__ = 'parent'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
container_id = Column(Integer, ForeignKey('data_container.id'))
container = relationship("DataContainer")
class DataContainer(ComparableEntity, Base):
__tablename__ = "data_container"
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(10))
jobs = relationship(Job, order_by=Job.id)
@classmethod
def insert_data(cls):
s = Session(testing.db)
s.add_all(cls._fixture())
s.commit()
@classmethod
def _fixture(cls):
ParentThing, DataContainer, SubJob = \
cls.classes.ParentThing,\
cls.classes.DataContainer,\
cls.classes.SubJob
return [
ParentThing(
container=DataContainer(name="d1",
jobs=[
SubJob(attr="s1"),
SubJob(attr="s2")
])
),
ParentThing(
container=DataContainer(name="d2",
jobs=[
SubJob(attr="s3"),
SubJob(attr="s4")
])
),
]
@classmethod
def _dc_fixture(cls):
return [p.container for p in cls._fixture()]
def test_contains_eager_wpoly(self):
ParentThing, DataContainer, Job, SubJob = \
self.classes.ParentThing,\
self.classes.DataContainer,\
self.classes.Job,\
self.classes.SubJob
Job_P = with_polymorphic(Job, SubJob, aliased=True)
s = Session(testing.db)
q = s.query(DataContainer).\
join(DataContainer.jobs.of_type(Job_P)).\
options(contains_eager(DataContainer.jobs.of_type(Job_P)))
def go():
eq_(
q.all(),
self._dc_fixture()
)
self.assert_sql_count(testing.db, go, 1)
def test_joinedload_wpoly(self):
ParentThing, DataContainer, Job, SubJob = \
self.classes.ParentThing,\
self.classes.DataContainer,\
self.classes.Job,\
self.classes.SubJob
Job_P = with_polymorphic(Job, SubJob, aliased=True)
s = Session(testing.db)
q = s.query(DataContainer).\
options(joinedload(DataContainer.jobs.of_type(Job_P)))
def go():
eq_(
q.all(),
self._dc_fixture()
)
self.assert_sql_count(testing.db, go, 1)
def test_joinedload_wsubclass(self):
ParentThing, DataContainer, Job, SubJob = \
self.classes.ParentThing,\
self.classes.DataContainer,\
self.classes.Job,\
self.classes.SubJob
s = Session(testing.db)
q = s.query(DataContainer).\
options(joinedload(DataContainer.jobs.of_type(SubJob)))
def go():
eq_(
q.all(),
self._dc_fixture()
)
self.assert_sql_count(testing.db, go, 1)
def test_lazyload(self):
DataContainer = self.classes.DataContainer
s = Session(testing.db)
q = s.query(DataContainer)
def go():
eq_(
q.all(),
self._dc_fixture()
)
# SELECT data container
# SELECT job * 2 container rows
# SELECT subjob * 4 rows
self.assert_sql_count(testing.db, go, 7)
def test_subquery_wsubclass(self):
ParentThing, DataContainer, Job, SubJob = \
self.classes.ParentThing,\
self.classes.DataContainer,\
self.classes.Job,\
self.classes.SubJob
s = Session(testing.db)
q = s.query(DataContainer).\
options(subqueryload(DataContainer.jobs.of_type(SubJob)))
def go():
eq_(
q.all(),
self._dc_fixture()
)
self.assert_sql_count(testing.db, go, 2)
def test_twolevel_subqueryload_wsubclass(self):
ParentThing, DataContainer, Job, SubJob = \
self.classes.ParentThing,\
self.classes.DataContainer,\
self.classes.Job,\
self.classes.SubJob
s = Session(testing.db)
q = s.query(ParentThing).\
options(
subqueryload_all(
ParentThing.container,
DataContainer.jobs.of_type(SubJob)
))
def go():
eq_(
q.all(),
self._fixture()
)
self.assert_sql_count(testing.db, go, 3)
def test_twolevel_joinedload_wsubclass(self):
ParentThing, DataContainer, Job, SubJob = \
self.classes.ParentThing,\
self.classes.DataContainer,\
self.classes.Job,\
self.classes.SubJob
s = Session(testing.db)
q = s.query(ParentThing).\
options(
joinedload_all(
ParentThing.container,
DataContainer.jobs.of_type(SubJob)
))
def go():
eq_(
q.all(),
self._fixture()
)
self.assert_sql_count(testing.db, go, 1)
def test_any_wpoly(self):
ParentThing, DataContainer, Job, SubJob = \
self.classes.ParentThing,\
self.classes.DataContainer,\
self.classes.Job,\
self.classes.SubJob
Job_P = with_polymorphic(Job, SubJob, aliased=True, flat=True)
s = Session()
q = s.query(Job).join(DataContainer.jobs).\
filter(
DataContainer.jobs.of_type(Job_P).\
any(Job_P.id < Job.id)
)
self.assert_compile(q,
"SELECT job.id AS job_id, job.type AS job_type, "
"job.container_id "
"AS job_container_id "
"FROM data_container "
"JOIN job ON data_container.id = job.container_id "
"WHERE EXISTS (SELECT 1 "
"FROM job AS job_1 LEFT OUTER JOIN subjob AS subjob_1 "
"ON job_1.id = subjob_1.id "
"WHERE data_container.id = job_1.container_id "
"AND job_1.id < job.id)"
)
def test_any_walias(self):
ParentThing, DataContainer, Job, SubJob = \
self.classes.ParentThing,\
self.classes.DataContainer,\
self.classes.Job,\
self.classes.SubJob
Job_A = aliased(Job)
s = Session()
q = s.query(Job).join(DataContainer.jobs).\
filter(
DataContainer.jobs.of_type(Job_A).\
any(and_(Job_A.id < Job.id, Job_A.type=='fred'))
)
self.assert_compile(q,
"SELECT job.id AS job_id, job.type AS job_type, "
"job.container_id AS job_container_id "
"FROM data_container JOIN job ON data_container.id = job.container_id "
"WHERE EXISTS (SELECT 1 "
"FROM job AS job_1 "
"WHERE data_container.id = job_1.container_id "
"AND job_1.id < job.id AND job_1.type = :type_1)"
)
def test_join_wpoly(self):
ParentThing, DataContainer, Job, SubJob = \
self.classes.ParentThing,\
self.classes.DataContainer,\
self.classes.Job,\
self.classes.SubJob
Job_P = with_polymorphic(Job, SubJob)
s = Session()
q = s.query(DataContainer).join(DataContainer.jobs.of_type(Job_P))
self.assert_compile(q,
"SELECT data_container.id AS data_container_id, "
"data_container.name AS data_container_name "
"FROM data_container JOIN "
"(job LEFT OUTER JOIN subjob "
"ON job.id = subjob.id) "
"ON data_container.id = job.container_id")
def test_join_wsubclass(self):
ParentThing, DataContainer, Job, SubJob = \
self.classes.ParentThing,\
self.classes.DataContainer,\
self.classes.Job,\
self.classes.SubJob
s = Session()
q = s.query(DataContainer).join(DataContainer.jobs.of_type(SubJob))
# note the of_type() here renders JOIN for the Job->SubJob.
# this is because it's using the SubJob mapper directly within
# query.join(). When we do joinedload() etc., we're instead
# doing a with_polymorphic(), and there we need the join to be
# outer by default.
self.assert_compile(q,
"SELECT data_container.id AS data_container_id, "
"data_container.name AS data_container_name "
"FROM data_container JOIN (job JOIN subjob ON job.id = subjob.id) "
"ON data_container.id = job.container_id"
)
def test_join_wpoly_innerjoin(self):
ParentThing, DataContainer, Job, SubJob = \
self.classes.ParentThing,\
self.classes.DataContainer,\
self.classes.Job,\
self.classes.SubJob
Job_P = with_polymorphic(Job, SubJob, innerjoin=True)
s = Session()
q = s.query(DataContainer).join(DataContainer.jobs.of_type(Job_P))
self.assert_compile(q,
"SELECT data_container.id AS data_container_id, "
"data_container.name AS data_container_name "
"FROM data_container JOIN "
"(job JOIN subjob ON job.id = subjob.id) "
"ON data_container.id = job.container_id")
def test_join_walias(self):
ParentThing, DataContainer, Job, SubJob = \
self.classes.ParentThing,\
self.classes.DataContainer,\
self.classes.Job,\
self.classes.SubJob
Job_A = aliased(Job)
s = Session()
q = s.query(DataContainer).join(DataContainer.jobs.of_type(Job_A))
self.assert_compile(q,
"SELECT data_container.id AS data_container_id, "
"data_container.name AS data_container_name "
"FROM data_container JOIN job AS job_1 "
"ON data_container.id = job_1.container_id")
def test_join_explicit_wpoly_noalias(self):
ParentThing, DataContainer, Job, SubJob = \
self.classes.ParentThing,\
self.classes.DataContainer,\
self.classes.Job,\
self.classes.SubJob
Job_P = with_polymorphic(Job, SubJob)
s = Session()
q = s.query(DataContainer).join(Job_P, DataContainer.jobs)
self.assert_compile(q,
"SELECT data_container.id AS data_container_id, "
"data_container.name AS data_container_name "
"FROM data_container JOIN "
"(job LEFT OUTER JOIN subjob "
"ON job.id = subjob.id) "
"ON data_container.id = job.container_id")
def test_join_explicit_wpoly_flat(self):
ParentThing, DataContainer, Job, SubJob = \
self.classes.ParentThing,\
self.classes.DataContainer,\
self.classes.Job,\
self.classes.SubJob
Job_P = with_polymorphic(Job, SubJob, flat=True)
s = Session()
q = s.query(DataContainer).join(Job_P, DataContainer.jobs)
self.assert_compile(q,
"SELECT data_container.id AS data_container_id, "
"data_container.name AS data_container_name "
"FROM data_container JOIN "
"(job AS job_1 LEFT OUTER JOIN subjob AS subjob_1 "
"ON job_1.id = subjob_1.id) "
"ON data_container.id = job_1.container_id")
def test_join_explicit_wpoly_full_alias(self):
ParentThing, DataContainer, Job, SubJob = \
self.classes.ParentThing,\
self.classes.DataContainer,\
self.classes.Job,\
self.classes.SubJob
Job_P = with_polymorphic(Job, SubJob, aliased=True)
s = Session()
q = s.query(DataContainer).join(Job_P, DataContainer.jobs)
self.assert_compile(q,
"SELECT data_container.id AS data_container_id, "
"data_container.name AS data_container_name "
"FROM data_container JOIN "
"(SELECT job.id AS job_id, job.type AS job_type, "
"job.container_id AS job_container_id, "
"subjob.id AS subjob_id, subjob.attr AS subjob_attr "
"FROM job LEFT OUTER JOIN subjob ON job.id = subjob.id) "
"AS anon_1 ON data_container.id = anon_1.job_container_id"
)
|
achang97/YouTunes
|
refs/heads/master
|
lib/python2.7/site-packages/s3transfer/exceptions.py
|
11
|
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from concurrent.futures import CancelledError
class RetriesExceededError(Exception):
def __init__(self, last_exception, msg='Max Retries Exceeded'):
super(RetriesExceededError, self).__init__(msg)
self.last_exception = last_exception
class S3UploadFailedError(Exception):
pass
class InvalidSubscriberMethodError(Exception):
pass
class TransferNotDoneError(Exception):
pass
class FatalError(CancelledError):
"""A CancelledError raised from an error in the TransferManager"""
pass
|
osvalr/odoo
|
refs/heads/8.0
|
openerp/netsvc.py
|
220
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2014 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import logging.handlers
import os
import platform
import pprint
import release
import sys
import threading
import psycopg2
import openerp
import sql_db
import tools
_logger = logging.getLogger(__name__)
def log(logger, level, prefix, msg, depth=None):
indent=''
indent_after=' '*len(prefix)
for line in (prefix + pprint.pformat(msg, depth=depth)).split('\n'):
logger.log(level, indent+line)
indent=indent_after
def LocalService(name):
"""
The openerp.netsvc.LocalService() function is deprecated. It still works
in two cases: workflows and reports. For workflows, instead of using
LocalService('workflow'), openerp.workflow should be used (better yet,
methods on openerp.osv.orm.Model should be used). For reports,
openerp.report.render_report() should be used (methods on the Model should
be provided too in the future).
"""
assert openerp.conf.deprecation.allow_local_service
_logger.warning("LocalService() is deprecated since march 2013 (it was called with '%s')." % name)
if name == 'workflow':
return openerp.workflow
if name.startswith('report.'):
report = openerp.report.interface.report_int._reports.get(name)
if report:
return report
else:
dbname = getattr(threading.currentThread(), 'dbname', None)
if dbname:
registry = openerp.modules.registry.RegistryManager.get(dbname)
with registry.cursor() as cr:
return registry['ir.actions.report.xml']._lookup_report(cr, name[len('report.'):])
path_prefix = os.path.realpath(os.path.dirname(os.path.dirname(__file__)))
class PostgreSQLHandler(logging.Handler):
""" PostgreSQL Loggin Handler will store logs in the database, by default
the current database, can be set using --log-db=DBNAME
"""
def emit(self, record):
ct = threading.current_thread()
ct_db = getattr(ct, 'dbname', None)
dbname = tools.config['log_db'] if tools.config['log_db'] and tools.config['log_db'] != '%d' else ct_db
if not dbname:
return
with tools.ignore(Exception), tools.mute_logger('openerp.sql_db'), sql_db.db_connect(dbname, allow_uri=True).cursor() as cr:
cr.autocommit(True)
msg = tools.ustr(record.msg)
if record.args:
msg = msg % record.args
traceback = getattr(record, 'exc_text', '')
if traceback:
msg = "%s\n%s" % (msg, traceback)
# we do not use record.levelname because it may have been changed by ColoredFormatter.
levelname = logging.getLevelName(record.levelno)
val = ('server', ct_db, record.name, levelname, msg, record.pathname[len(path_prefix)+1:], record.lineno, record.funcName)
cr.execute("""
INSERT INTO ir_logging(create_date, type, dbname, name, level, message, path, line, func)
VALUES (NOW() at time zone 'UTC', %s, %s, %s, %s, %s, %s, %s, %s)
""", val)
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, _NOTHING, DEFAULT = range(10)
#The background is set with 40 plus the number of the color, and the foreground with 30
#These are the sequences need to get colored ouput
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
COLOR_PATTERN = "%s%s%%s%s" % (COLOR_SEQ, COLOR_SEQ, RESET_SEQ)
LEVEL_COLOR_MAPPING = {
logging.DEBUG: (BLUE, DEFAULT),
logging.INFO: (GREEN, DEFAULT),
logging.WARNING: (YELLOW, DEFAULT),
logging.ERROR: (RED, DEFAULT),
logging.CRITICAL: (WHITE, RED),
}
class DBFormatter(logging.Formatter):
def format(self, record):
record.pid = os.getpid()
record.dbname = getattr(threading.currentThread(), 'dbname', '?')
return logging.Formatter.format(self, record)
class ColoredFormatter(DBFormatter):
def format(self, record):
fg_color, bg_color = LEVEL_COLOR_MAPPING.get(record.levelno, (GREEN, DEFAULT))
record.levelname = COLOR_PATTERN % (30 + fg_color, 40 + bg_color, record.levelname)
return DBFormatter.format(self, record)
_logger_init = False
def init_logger():
global _logger_init
if _logger_init:
return
_logger_init = True
logging.addLevelName(25, "INFO")
from tools.translate import resetlocale
resetlocale()
# create a format for log messages and dates
format = '%(asctime)s %(pid)s %(levelname)s %(dbname)s %(name)s: %(message)s'
if tools.config['syslog']:
# SysLog Handler
if os.name == 'nt':
handler = logging.handlers.NTEventLogHandler("%s %s" % (release.description, release.version))
elif platform.system() == 'Darwin':
handler = logging.handlers.SysLogHandler('/var/run/log')
else:
handler = logging.handlers.SysLogHandler('/dev/log')
format = '%s %s' % (release.description, release.version) \
+ ':%(dbname)s:%(levelname)s:%(name)s:%(message)s'
elif tools.config['logfile']:
# LogFile Handler
logf = tools.config['logfile']
try:
# We check we have the right location for the log files
dirname = os.path.dirname(logf)
if dirname and not os.path.isdir(dirname):
os.makedirs(dirname)
if tools.config['logrotate'] is not False:
handler = logging.handlers.TimedRotatingFileHandler(filename=logf, when='D', interval=1, backupCount=30)
elif os.name == 'posix':
handler = logging.handlers.WatchedFileHandler(logf)
else:
handler = logging.FileHandler(logf)
except Exception:
sys.stderr.write("ERROR: couldn't create the logfile directory. Logging to the standard output.\n")
handler = logging.StreamHandler(sys.stdout)
else:
# Normal Handler on standard output
handler = logging.StreamHandler(sys.stdout)
# Check that handler.stream has a fileno() method: when running OpenERP
# behind Apache with mod_wsgi, handler.stream will have type mod_wsgi.Log,
# which has no fileno() method. (mod_wsgi.Log is what is being bound to
# sys.stderr when the logging.StreamHandler is being constructed above.)
def is_a_tty(stream):
return hasattr(stream, 'fileno') and os.isatty(stream.fileno())
if os.name == 'posix' and isinstance(handler, logging.StreamHandler) and is_a_tty(handler.stream):
formatter = ColoredFormatter(format)
else:
formatter = DBFormatter(format)
handler.setFormatter(formatter)
logging.getLogger().addHandler(handler)
if tools.config['log_db']:
db_levels = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
postgresqlHandler = PostgreSQLHandler()
postgresqlHandler.setLevel(int(db_levels.get(tools.config['log_db_level'], tools.config['log_db_level'])))
logging.getLogger().addHandler(postgresqlHandler)
# Configure loggers levels
pseudo_config = PSEUDOCONFIG_MAPPER.get(tools.config['log_level'], [])
logconfig = tools.config['log_handler']
logging_configurations = DEFAULT_LOG_CONFIGURATION + pseudo_config + logconfig
for logconfig_item in logging_configurations:
loggername, level = logconfig_item.split(':')
level = getattr(logging, level, logging.INFO)
logger = logging.getLogger(loggername)
logger.setLevel(level)
for logconfig_item in logging_configurations:
_logger.debug('logger level set: "%s"', logconfig_item)
DEFAULT_LOG_CONFIGURATION = [
'openerp.workflow.workitem:WARNING',
'openerp.http.rpc.request:INFO',
'openerp.http.rpc.response:INFO',
'openerp.addons.web.http:INFO',
'openerp.sql_db:INFO',
':INFO',
]
PSEUDOCONFIG_MAPPER = {
'debug_rpc_answer': ['openerp:DEBUG','openerp.http.rpc.request:DEBUG', 'openerp.http.rpc.response:DEBUG'],
'debug_rpc': ['openerp:DEBUG','openerp.http.rpc.request:DEBUG'],
'debug': ['openerp:DEBUG'],
'debug_sql': ['openerp.sql_db:DEBUG'],
'info': [],
'warn': ['openerp:WARNING', 'werkzeug:WARNING'],
'error': ['openerp:ERROR', 'werkzeug:ERROR'],
'critical': ['openerp:CRITICAL', 'werkzeug:CRITICAL'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
CenterForTheBuiltEnvironment/mave
|
refs/heads/master
|
mave/trainers.py
|
1
|
import numpy as np
import pdb
import estimators
from scipy.stats import randint as sp_randint
from sklearn import cross_validation, svm, grid_search, \
ensemble, neighbors, dummy
class ModelTrainer(object):
def __init__(self,
search_iterations=20,
n_jobs=-1,
k=10,
verbose= False,
**kwargs):
self.search_iterations = search_iterations
self.n_jobs = n_jobs
self.k = k
self.verbose = verbose
def train(self, dataset, randomized_search=True):
# using a random grid search assessed using k-fold cross validation
if randomized_search:
self.model = grid_search.RandomizedSearchCV(
self.model,
param_distributions=self.params,
n_iter=self.search_iterations,
n_jobs=self.n_jobs,
cv=self.k,
verbose=self.verbose)
# otherwise do an exhaustive grid search
else:
self.model = grid_search.GridSearchCV(
self.model,
param_grid=self.params,
n_jobs=self.n_jobs,
cv=self.k,
verbose=self.verbose)
self.model.fit(dataset.X_s, dataset.y_s)
return self.model
class DummyTrainer(ModelTrainer):
params = {"strategy": ['mean', 'median']}
model = dummy.DummyRegressor()
def __init__(self, **kwargs):
super(DummyTrainer, self).__init__(**kwargs)
class HourWeekdayBinModelTrainer(ModelTrainer):
params = {"strategy": ['mean', 'median']}
model = estimators.HourWeekdayBinModel()
def __init__(self, **kwargs):
super(HourWeekdayBinModelTrainer, self).__init__(**kwargs)
class KNeighborsTrainer(ModelTrainer):
params = {
"p": [1,2],
"n_neighbors": sp_randint(6, 40),
"leaf_size": np.logspace(1, 2.5, 1000)
}
model = neighbors.KNeighborsRegressor()
def __init__(self, **kwargs):
super(KNeighborsTrainer, self).__init__(**kwargs)
class SVRTrainer(ModelTrainer):
params = {
"C": np.logspace(-3, 1, 1000),
"epsilon": np.logspace(-3, 0.5, 1000),
"degree": [2,3,4],
"gamma": np.logspace(-3, 2, 1000),
"max_iter": [20000]
}
model = svm.SVR()
def __init__(self, **kwargs):
super(SVRTrainer, self).__init__(**kwargs)
class RandomForestTrainer(ModelTrainer):
#TODO: check the validity of max_features=4
max_features = 4
params = {
"max_depth": [4, 5, 6, 7, 8, 9, 10, None],
"max_features": sp_randint(3, max_features),
"min_samples_split": sp_randint(5, 500),
"min_samples_leaf": sp_randint(5, 500),
"bootstrap": [True, False]
}
model = ensemble.RandomForestRegressor()
def __init__(self, **kwargs):
super(RandomForestTrainer, self).__init__(**kwargs)
class GradientBoostingTrainer(ModelTrainer):
max_features = 4
params = {
"max_depth": [4, 5, 6, 7, 8, 9, 10, None],
"n_estimators": np.logspace(1.5, 4, 1000).astype(int),
"max_features": sp_randint(3, max_features),
"min_samples_split": sp_randint(5, 50),
"min_samples_leaf": sp_randint(5, 50),
"subsample": [0.8, 1.0],
"learning_rate": [0.05, 0.1, 0.2, 0.5]
}
model = ensemble.GradientBoostingRegressor()
def __init__(self, **kwargs):
super(GradientBoostingTrainer, self).__init__(**kwargs)
class ExtraTreesTrainer(ModelTrainer):
max_features = 4
params = {
"max_depth": [4, 5, 6, 7, 8, 9, 10, None],
"n_estimators": sp_randint(5, 50),
"max_features": sp_randint(3, max_features),
"min_samples_split": sp_randint(5, 50),
"min_samples_leaf": sp_randint(5, 50),
"bootstrap": [True, False]
}
model = ensemble.ExtraTreesRegressor()
def __init__(self, **kwargs):
super(ExtraTreesTrainer, self).__init__(**kwargs)
if __name__=='__main__':
t = DummyTrainer()
|
JdeRobot/JdeRobot
|
refs/heads/master
|
src/drivers/drone/__init__.py
|
5
|
from camera import *
from cmdvel import *
from extra import *
from pose3d import *
import time
import math
import rospy
import mavros
import threading
from mavros import setpoint as SP
class Drone(threading.Thread):
def __init__(self, topicArming, topicLand, topicTakeOff,topicSetMode, topicVel, topicPose, topicCameraVentral, topicCameraFrontal):
self.__cameraVentral = ListenerCamera(topicCameraVentral)
self.__cameraFrontal = ListenerCamera(topicCameraFrontal)
self.__extra = PublisherExtra(topicArming, topicLand, topicSetMode, topicTakeOff)
self.__cmdvel = PublisherCMDVel(topicVel)
self.__pose3d = ListenerPose3d(topicPose)
rospy.Rate(20)
mavros.set_namespace()
self.stop_event = threading.Event()
self.lock = threading.Lock()
threading.Thread.__init__(self, args=self.stop_event)
def getImageVentral(self):
return self.__cameraVentral.getImage()
def getImageFrontal(self):
return self.__cameraFrontal.getImage()
def takeoff(self):
self.__extra.get_coordinates()
self.__extra.arming()
self.__extra.takeoff()
self.__extra.change_mode()
def get_coordinates(self):
self.__extra.get_coordinates()
def land(self):
self.__extra.land()
def toggleCam(self):
self.__extra.toggleCam()
def reset(self):
self.__extra.reset()
def record(self, record):
self.__extra.record(record)
def sendCMDVelocities (self,vx,vy,vz,yaw_rate):
pz = 2 - self.__pose3d.getPose3d().z + vz
self.__cmdvel.sendCMDVel(0,0,pz,-vx,vy,vz,0,0,0,0,yaw_rate)
def sendVelocities(self):
self.__cmdvel.sendVelocities()
def setVX(self, vx):
self.__cmdvel.setVX(vx)
def setVY(self, vy):
self.__cmdvel.setVY(vy)
def setVZ(self,vz):
self.__cmdvel.setVZ(vz)
def setAngularZ(self, az):
self.__cmdvel.setAngularZ(az)
def setAngularX(self,ax):
self.__cmdvel.setAngularX(ax)
def setAngularY(self,ay):
self.__cmdvel.setAngularY(ay)
def setYaw(self,yaw):
self.setAngularZ(yaw)
def setRoll(self,roll):
self.setAngularX(roll)
def setPitch(self,pitch):
self.setAngularY(pitch)
def getPose3d(self):
return self.__pose3d.getPose3d()
def stop(self):
self.__pose3d.stop()
self.__cameraVentral.stop()
self.__cameraFrontal.stop()
def pause (self):
self.__cmdvel.pause()
def resume (self):
self.__cmdvel.resume()
|
tlksio/tlksio
|
refs/heads/develop
|
env/lib/python3.4/site-packages/whoosh/util/testing.py
|
95
|
# Copyright 2007 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
import os.path
import random
import shutil
import sys
import tempfile
from contextlib import contextmanager
from whoosh.filedb.filestore import FileStorage
from whoosh.util import now, random_name
class TempDir(object):
def __init__(self, basename="", parentdir=None, ext=".whoosh",
suppress=frozenset(), keepdir=False):
self.basename = basename or random_name(8)
self.parentdir = parentdir
dirname = parentdir or tempfile.mkdtemp(ext, self.basename)
self.dir = os.path.abspath(dirname)
self.suppress = suppress
self.keepdir = keepdir
def __enter__(self):
if not os.path.exists(self.dir):
os.makedirs(self.dir)
return self.dir
def cleanup(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
self.cleanup()
if not self.keepdir:
try:
shutil.rmtree(self.dir)
except OSError:
e = sys.exc_info()[1]
#sys.stderr.write("Can't remove temp dir: " + str(e) + "\n")
#if exc_type is None:
# raise
if exc_type is not None:
if self.keepdir:
sys.stderr.write("Temp dir=" + self.dir + "\n")
if exc_type not in self.suppress:
return False
class TempStorage(TempDir):
def __init__(self, debug=False, **kwargs):
TempDir.__init__(self, **kwargs)
self._debug = debug
def cleanup(self):
self.store.close()
def __enter__(self):
dirpath = TempDir.__enter__(self)
self.store = FileStorage(dirpath, debug=self._debug)
return self.store
class TempIndex(TempStorage):
def __init__(self, schema, ixname='', storage_debug=False, **kwargs):
TempStorage.__init__(self, basename=ixname, debug=storage_debug,
**kwargs)
self.schema = schema
def __enter__(self):
fstore = TempStorage.__enter__(self)
return fstore.create_index(self.schema, indexname=self.basename)
def is_abstract_method(attr):
"""Returns True if the given object has __isabstractmethod__ == True.
"""
return (hasattr(attr, "__isabstractmethod__")
and getattr(attr, "__isabstractmethod__"))
def check_abstract_methods(base, subclass):
"""Raises AssertionError if ``subclass`` does not override a method on
``base`` that is marked as an abstract method.
"""
for attrname in dir(base):
if attrname.startswith("_"):
continue
attr = getattr(base, attrname)
if is_abstract_method(attr):
oattr = getattr(subclass, attrname)
if is_abstract_method(oattr):
raise Exception("%s.%s not overridden"
% (subclass.__name__, attrname))
@contextmanager
def timing(name=None):
t = now()
yield
t = now() - t
print("%s: %0.06f s" % (name or '', t))
|
wangjun/pyload
|
refs/heads/stable
|
module/plugins/crypter/QuickshareCzFolder.py
|
3
|
# -*- coding: utf-8 -*-
import re
from module.plugins.Crypter import Crypter
class QuickshareCzFolder(Crypter):
__name__ = "QuickshareCzFolder"
__type__ = "crypter"
__pattern__ = r"http://(www\.)?quickshare.cz/slozka-\d+.*"
__version__ = "0.1"
__description__ = """Quickshare.cz Folder Plugin"""
__author_name__ = ("zoidberg")
__author_mail__ = ("zoidberg@mujmail.cz")
FOLDER_PATTERN = r'<textarea[^>]*>(.*?)</textarea>'
LINK_PATTERN = r'(http://www.quickshare.cz/\S+)'
def decrypt(self, pyfile):
html = self.load(self.pyfile.url)
new_links = []
found = re.search(self.FOLDER_PATTERN, html, re.DOTALL)
if found is None:
self.fail("Parse error (FOLDER)")
new_links.extend(re.findall(self.LINK_PATTERN, found.group(1)))
if new_links:
self.core.files.addLinks(new_links, self.pyfile.package().id)
else:
self.fail('Could not extract any links')
|
testvidya11/ejrf
|
refs/heads/master
|
questionnaire/forms/answers.py
|
1
|
from django import forms
from django.forms.util import ErrorDict
from django.forms import ModelForm, ModelChoiceField
from django.utils.html import format_html
from django.utils.encoding import force_text
from django.utils.safestring import mark_safe
from questionnaire.models import NumericalAnswer, TextAnswer, DateAnswer, MultiChoiceAnswer, AnswerGroup
class AnswerForm(ModelForm):
def __init__(self, *args, **kwargs):
super(AnswerForm, self).__init__(*args, **kwargs)
self.question = self._get_question(kwargs)
self.fields['response'].required = self.question.is_required
self._initial = self._set_initial(kwargs)
self.is_editing = False
self._set_instance()
self.question_group = self._initial['group'] if self._initial else None
def _set_initial(self, kwargs):
initial = kwargs['initial'] if 'initial' in kwargs else {}
if self.data and 'response' and self.data.keys():
if 'response' in initial.keys():
del initial['response']
return initial
def _set_instance(self):
if 'answer' in self._initial:
self.is_editing = True
self.instance = self._initial['answer']
def show_is_required_errors(self):
if self.question.is_required and not self.data and not self._initial.get('response', None):
self._errors = self._errors or ErrorDict()
self._errors['response'] = self.error_class(['This field is required.'])
def save(self, commit=True, *args, **kwargs):
if self.is_editing:
return super(AnswerForm, self).save(commit=commit, *args, **kwargs)
return self._create_new_answer(*args, **kwargs)
def _create_new_answer(self, *args, **kwargs):
answer = super(AnswerForm, self).save(commit=False, *args, **kwargs)
self._add_extra_attributes_to(answer)
answer.save()
self._add_to_answer_group(answer)
return answer
def _add_extra_attributes_to(self, answer):
for attribute in self._initial.keys():
setattr(answer, attribute, self._initial[attribute])
def _add_to_answer_group(self, answer):
answer_group = AnswerGroup.objects.get_or_create(grouped_question=self.question_group)[0]
answer_group.answer.add(answer)
def _get_question(self, kwargs):
return kwargs['initial'].get('question', None)
class NumericalAnswerForm(AnswerForm):
class Meta:
model = NumericalAnswer
exclude = ('question', 'status', 'country', 'version', 'code')
class TextAnswerForm(AnswerForm):
response = forms.CharField(widget=forms.Textarea)
class Meta:
model = TextAnswer
exclude = ('question', 'status', 'country', 'version', 'code')
class DateAnswerForm(AnswerForm):
class Meta:
model = DateAnswer
exclude = ('question', 'status', 'country', 'version', 'code')
widgets = {
'response': forms.DateInput(attrs={'class': 'form-control datetimepicker', 'data-format':'YYYY-MM-DD'})
}
class MultiChoiceAnswerSelectWidget(forms.Select):
def __init__(self, attrs=None, choices=(), question_options=None):
super(MultiChoiceAnswerSelectWidget, self).__init__(attrs, choices)
self.question_options = question_options
def render_option(self, selected_choices, option_value, option_label):
option_value = force_text(option_value)
data_instruction = ''
if option_value:
data_instruction = mark_safe(' data-instructions="%s"' % self.question_options.get(id=int(option_value)).instructions)
if option_value in selected_choices:
selected_html = mark_safe(' selected="selected"')
else:
selected_html = ''
return format_html('<option value="{0}"{1}{2}>{3}</option>',
option_value,
selected_html,
data_instruction,
force_text(option_label))
class MultiChoiceAnswerForm(AnswerForm):
response = ModelChoiceField(queryset=None, widget=forms.Select())
def __init__(self, *args, **kwargs):
super(MultiChoiceAnswerForm, self).__init__(*args, **kwargs)
query_set = self._get_response_choices(kwargs)
self.fields['response'].widget = self._get_response_widget(query_set)
self.fields['response'].queryset = query_set
self.fields['response'].empty_label = self._set_response_label(query_set)
def _set_response_label(self, query_set):
if self.widget_is_radio_button(query_set):
return None
return "Choose One"
def widget_is_radio_button(self, query_set):
return query_set.count() <= 2 or query_set.filter(text='Yes').exists() or query_set.filter(text='Male').exists()
def _get_response_widget(self, query_set):
if self.widget_is_radio_button(query_set):
return forms.RadioSelect()
if query_set.exclude(instructions=None).exists():
return MultiChoiceAnswerSelectWidget(question_options=query_set)
return forms.Select()
def _get_response_choices(self, kwargs):
return self.question.options.all()
class Meta:
model = MultiChoiceAnswer
exclude = ('question', 'status', 'country', 'version', 'code')
|
TeamTwisted/external_chromium_org
|
refs/heads/opti-5.1
|
native_client_sdk/src/tools/tests/chrome_mock.py
|
107
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import sys
import time
import urllib2
def PrintAndFlush(s):
print s
sys.stdout.flush()
def main(args):
parser = optparse.OptionParser(usage='%prog [options] <URL to load>')
parser.add_option('--post', help='POST to URL.', dest='post',
action='store_true')
parser.add_option('--get', help='GET to URL.', dest='get',
action='store_true')
parser.add_option('--sleep',
help='Number of seconds to sleep after reading URL',
dest='sleep', default=0)
parser.add_option('--expect-to-be-killed', help='If set, the script will warn'
' if it isn\'t killed before it finishes sleeping.',
dest='expect_to_be_killed', action='store_true')
options, args = parser.parse_args(args)
if len(args) != 1:
parser.error('Expected URL to load.')
PrintAndFlush('Starting %s.' % sys.argv[0])
if options.post:
urllib2.urlopen(args[0], data='').read()
elif options.get:
urllib2.urlopen(args[0]).read()
else:
# Do nothing but wait to be killed.
pass
time.sleep(float(options.sleep))
if options.expect_to_be_killed:
PrintAndFlush('Done sleeping. Expected to be killed.')
sys.exit(0)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
leafclick/intellij-community
|
refs/heads/master
|
python/testData/completion/matMul.after.py
|
79
|
class C:
def __matmul__(self, other):
|
gauravbose/digital-menu
|
refs/heads/master
|
digimenu2/django/conf/locale/ko/formats.py
|
115
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'Y년 n월 j일'
TIME_FORMAT = 'A g:i'
DATETIME_FORMAT = 'Y년 n월 j일 g:i A'
YEAR_MONTH_FORMAT = 'Y년 F월'
MONTH_DAY_FORMAT = 'F월 j일'
SHORT_DATE_FORMAT = 'Y-n-j.'
SHORT_DATETIME_FORMAT = 'Y-n-j H:i'
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
# '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
'%Y년 %m월 %d일', # '2006년 10월 25일', with localized suffix.
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M:%S.%f', # '14:30:59.000200'
'%H:%M', # '14:30'
'%H시 %M분 %S초', # '14시 30분 59초'
'%H시 %M분', # '14시 30분'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
'%Y년 %m월 %d일 %H시 %M분 %S초', # '2006년 10월 25일 14시 30분 59초'
'%Y년 %m월 %d일 %H시 %M분', # '2006년 10월 25일 14시 30분'
)
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
|
AlexanderFabisch/scikit-learn
|
refs/heads/master
|
sklearn/externals/joblib/testing.py
|
330
|
"""
Helper for testing.
"""
import sys
import warnings
import os.path
def warnings_to_stdout():
""" Redirect all warnings to stdout.
"""
showwarning_orig = warnings.showwarning
def showwarning(msg, cat, fname, lno, file=None, line=0):
showwarning_orig(msg, cat, os.path.basename(fname), line, sys.stdout)
warnings.showwarning = showwarning
#warnings.simplefilter('always')
|
sakset/getyourdata
|
refs/heads/master
|
getyourdata/data_request/templatetags/__init__.py
|
12133432
| |
evernote/pootle
|
refs/heads/master
|
pootle/apps/evernote_auth/migrations/__init__.py
|
12133432
| |
jymannob/CouchPotatoServer
|
refs/heads/develop
|
libs/rsa/_version200.py
|
177
|
"""RSA module
Module for calculating large primes, and RSA encryption, decryption,
signing and verification. Includes generating public and private keys.
WARNING: this implementation does not use random padding, compression of the
cleartext input to prevent repetitions, or other common security improvements.
Use with care.
"""
__author__ = "Sybren Stuvel, Marloes de Boer, Ivo Tamboer, and Barry Mead"
__date__ = "2010-02-08"
__version__ = '2.0'
import math
import os
import random
import sys
import types
from rsa._compat import byte
# Display a warning that this insecure version is imported.
import warnings
warnings.warn('Insecure version of the RSA module is imported as %s' % __name__)
def bit_size(number):
"""Returns the number of bits required to hold a specific long number"""
return int(math.ceil(math.log(number,2)))
def gcd(p, q):
"""Returns the greatest common divisor of p and q
>>> gcd(48, 180)
12
"""
# Iterateive Version is faster and uses much less stack space
while q != 0:
if p < q: (p,q) = (q,p)
(p,q) = (q, p % q)
return p
def bytes2int(bytes):
"""Converts a list of bytes or a string to an integer
>>> (((128 * 256) + 64) * 256) + 15
8405007
>>> l = [128, 64, 15]
>>> bytes2int(l) #same as bytes2int('\x80@\x0f')
8405007
"""
if not (type(bytes) is types.ListType or type(bytes) is types.StringType):
raise TypeError("You must pass a string or a list")
# Convert byte stream to integer
integer = 0
for byte in bytes:
integer *= 256
if type(byte) is types.StringType: byte = ord(byte)
integer += byte
return integer
def int2bytes(number):
"""
Converts a number to a string of bytes
"""
if not (type(number) is types.LongType or type(number) is types.IntType):
raise TypeError("You must pass a long or an int")
string = ""
while number > 0:
string = "%s%s" % (byte(number & 0xFF), string)
number /= 256
return string
def to64(number):
"""Converts a number in the range of 0 to 63 into base 64 digit
character in the range of '0'-'9', 'A'-'Z', 'a'-'z','-','_'.
>>> to64(10)
'A'
"""
if not (type(number) is types.LongType or type(number) is types.IntType):
raise TypeError("You must pass a long or an int")
if 0 <= number <= 9: #00-09 translates to '0' - '9'
return byte(number + 48)
if 10 <= number <= 35:
return byte(number + 55) #10-35 translates to 'A' - 'Z'
if 36 <= number <= 61:
return byte(number + 61) #36-61 translates to 'a' - 'z'
if number == 62: # 62 translates to '-' (minus)
return byte(45)
if number == 63: # 63 translates to '_' (underscore)
return byte(95)
raise ValueError('Invalid Base64 value: %i' % number)
def from64(number):
"""Converts an ordinal character value in the range of
0-9,A-Z,a-z,-,_ to a number in the range of 0-63.
>>> from64(49)
1
"""
if not (type(number) is types.LongType or type(number) is types.IntType):
raise TypeError("You must pass a long or an int")
if 48 <= number <= 57: #ord('0') - ord('9') translates to 0-9
return(number - 48)
if 65 <= number <= 90: #ord('A') - ord('Z') translates to 10-35
return(number - 55)
if 97 <= number <= 122: #ord('a') - ord('z') translates to 36-61
return(number - 61)
if number == 45: #ord('-') translates to 62
return(62)
if number == 95: #ord('_') translates to 63
return(63)
raise ValueError('Invalid Base64 value: %i' % number)
def int2str64(number):
"""Converts a number to a string of base64 encoded characters in
the range of '0'-'9','A'-'Z,'a'-'z','-','_'.
>>> int2str64(123456789)
'7MyqL'
"""
if not (type(number) is types.LongType or type(number) is types.IntType):
raise TypeError("You must pass a long or an int")
string = ""
while number > 0:
string = "%s%s" % (to64(number & 0x3F), string)
number /= 64
return string
def str642int(string):
"""Converts a base64 encoded string into an integer.
The chars of this string in in the range '0'-'9','A'-'Z','a'-'z','-','_'
>>> str642int('7MyqL')
123456789
"""
if not (type(string) is types.ListType or type(string) is types.StringType):
raise TypeError("You must pass a string or a list")
integer = 0
for byte in string:
integer *= 64
if type(byte) is types.StringType: byte = ord(byte)
integer += from64(byte)
return integer
def read_random_int(nbits):
"""Reads a random integer of approximately nbits bits rounded up
to whole bytes"""
nbytes = int(math.ceil(nbits/8.))
randomdata = os.urandom(nbytes)
return bytes2int(randomdata)
def randint(minvalue, maxvalue):
"""Returns a random integer x with minvalue <= x <= maxvalue"""
# Safety - get a lot of random data even if the range is fairly
# small
min_nbits = 32
# The range of the random numbers we need to generate
range = (maxvalue - minvalue) + 1
# Which is this number of bytes
rangebytes = ((bit_size(range) + 7) / 8)
# Convert to bits, but make sure it's always at least min_nbits*2
rangebits = max(rangebytes * 8, min_nbits * 2)
# Take a random number of bits between min_nbits and rangebits
nbits = random.randint(min_nbits, rangebits)
return (read_random_int(nbits) % range) + minvalue
def jacobi(a, b):
"""Calculates the value of the Jacobi symbol (a/b)
where both a and b are positive integers, and b is odd
"""
if a == 0: return 0
result = 1
while a > 1:
if a & 1:
if ((a-1)*(b-1) >> 2) & 1:
result = -result
a, b = b % a, a
else:
if (((b * b) - 1) >> 3) & 1:
result = -result
a >>= 1
if a == 0: return 0
return result
def jacobi_witness(x, n):
"""Returns False if n is an Euler pseudo-prime with base x, and
True otherwise.
"""
j = jacobi(x, n) % n
f = pow(x, (n-1)/2, n)
if j == f: return False
return True
def randomized_primality_testing(n, k):
"""Calculates whether n is composite (which is always correct) or
prime (which is incorrect with error probability 2**-k)
Returns False if the number is composite, and True if it's
probably prime.
"""
# 50% of Jacobi-witnesses can report compositness of non-prime numbers
for i in range(k):
x = randint(1, n-1)
if jacobi_witness(x, n): return False
return True
def is_prime(number):
"""Returns True if the number is prime, and False otherwise.
>>> is_prime(42)
0
>>> is_prime(41)
1
"""
if randomized_primality_testing(number, 6):
# Prime, according to Jacobi
return True
# Not prime
return False
def getprime(nbits):
"""Returns a prime number of max. 'math.ceil(nbits/8)*8' bits. In
other words: nbits is rounded up to whole bytes.
>>> p = getprime(8)
>>> is_prime(p-1)
0
>>> is_prime(p)
1
>>> is_prime(p+1)
0
"""
while True:
integer = read_random_int(nbits)
# Make sure it's odd
integer |= 1
# Test for primeness
if is_prime(integer): break
# Retry if not prime
return integer
def are_relatively_prime(a, b):
"""Returns True if a and b are relatively prime, and False if they
are not.
>>> are_relatively_prime(2, 3)
1
>>> are_relatively_prime(2, 4)
0
"""
d = gcd(a, b)
return (d == 1)
def find_p_q(nbits):
"""Returns a tuple of two different primes of nbits bits"""
pbits = nbits + (nbits/16) #Make sure that p and q aren't too close
qbits = nbits - (nbits/16) #or the factoring programs can factor n
p = getprime(pbits)
while True:
q = getprime(qbits)
#Make sure p and q are different.
if not q == p: break
return (p, q)
def extended_gcd(a, b):
"""Returns a tuple (r, i, j) such that r = gcd(a, b) = ia + jb
"""
# r = gcd(a,b) i = multiplicitive inverse of a mod b
# or j = multiplicitive inverse of b mod a
# Neg return values for i or j are made positive mod b or a respectively
# Iterateive Version is faster and uses much less stack space
x = 0
y = 1
lx = 1
ly = 0
oa = a #Remember original a/b to remove
ob = b #negative values from return results
while b != 0:
q = long(a/b)
(a, b) = (b, a % b)
(x, lx) = ((lx - (q * x)),x)
(y, ly) = ((ly - (q * y)),y)
if (lx < 0): lx += ob #If neg wrap modulo orignal b
if (ly < 0): ly += oa #If neg wrap modulo orignal a
return (a, lx, ly) #Return only positive values
# Main function: calculate encryption and decryption keys
def calculate_keys(p, q, nbits):
"""Calculates an encryption and a decryption key for p and q, and
returns them as a tuple (e, d)"""
n = p * q
phi_n = (p-1) * (q-1)
while True:
# Make sure e has enough bits so we ensure "wrapping" through
# modulo n
e = max(65537,getprime(nbits/4))
if are_relatively_prime(e, n) and are_relatively_prime(e, phi_n): break
(d, i, j) = extended_gcd(e, phi_n)
if not d == 1:
raise Exception("e (%d) and phi_n (%d) are not relatively prime" % (e, phi_n))
if (i < 0):
raise Exception("New extended_gcd shouldn't return negative values")
if not (e * i) % phi_n == 1:
raise Exception("e (%d) and i (%d) are not mult. inv. modulo phi_n (%d)" % (e, i, phi_n))
return (e, i)
def gen_keys(nbits):
"""Generate RSA keys of nbits bits. Returns (p, q, e, d).
Note: this can take a long time, depending on the key size.
"""
(p, q) = find_p_q(nbits)
(e, d) = calculate_keys(p, q, nbits)
return (p, q, e, d)
def newkeys(nbits):
"""Generates public and private keys, and returns them as (pub,
priv).
The public key consists of a dict {e: ..., , n: ....). The private
key consists of a dict {d: ...., p: ...., q: ....).
"""
nbits = max(9,nbits) # Don't let nbits go below 9 bits
(p, q, e, d) = gen_keys(nbits)
return ( {'e': e, 'n': p*q}, {'d': d, 'p': p, 'q': q} )
def encrypt_int(message, ekey, n):
"""Encrypts a message using encryption key 'ekey', working modulo n"""
if type(message) is types.IntType:
message = long(message)
if not type(message) is types.LongType:
raise TypeError("You must pass a long or int")
if message < 0 or message > n:
raise OverflowError("The message is too long")
#Note: Bit exponents start at zero (bit counts start at 1) this is correct
safebit = bit_size(n) - 2 #compute safe bit (MSB - 1)
message += (1 << safebit) #add safebit to ensure folding
return pow(message, ekey, n)
def decrypt_int(cyphertext, dkey, n):
"""Decrypts a cypher text using the decryption key 'dkey', working
modulo n"""
message = pow(cyphertext, dkey, n)
safebit = bit_size(n) - 2 #compute safe bit (MSB - 1)
message -= (1 << safebit) #remove safebit before decode
return message
def encode64chops(chops):
"""base64encodes chops and combines them into a ',' delimited string"""
chips = [] #chips are character chops
for value in chops:
chips.append(int2str64(value))
#delimit chops with comma
encoded = ','.join(chips)
return encoded
def decode64chops(string):
"""base64decodes and makes a ',' delimited string into chops"""
chips = string.split(',') #split chops at commas
chops = []
for string in chips: #make char chops (chips) into chops
chops.append(str642int(string))
return chops
def chopstring(message, key, n, funcref):
"""Chops the 'message' into integers that fit into n,
leaving room for a safebit to be added to ensure that all
messages fold during exponentiation. The MSB of the number n
is not independant modulo n (setting it could cause overflow), so
use the next lower bit for the safebit. Therefore reserve 2-bits
in the number n for non-data bits. Calls specified encryption
function for each chop.
Used by 'encrypt' and 'sign'.
"""
msglen = len(message)
mbits = msglen * 8
#Set aside 2-bits so setting of safebit won't overflow modulo n.
nbits = bit_size(n) - 2 # leave room for safebit
nbytes = nbits / 8
blocks = msglen / nbytes
if msglen % nbytes > 0:
blocks += 1
cypher = []
for bindex in range(blocks):
offset = bindex * nbytes
block = message[offset:offset+nbytes]
value = bytes2int(block)
cypher.append(funcref(value, key, n))
return encode64chops(cypher) #Encode encrypted ints to base64 strings
def gluechops(string, key, n, funcref):
"""Glues chops back together into a string. calls
funcref(integer, key, n) for each chop.
Used by 'decrypt' and 'verify'.
"""
message = ""
chops = decode64chops(string) #Decode base64 strings into integer chops
for cpart in chops:
mpart = funcref(cpart, key, n) #Decrypt each chop
message += int2bytes(mpart) #Combine decrypted strings into a msg
return message
def encrypt(message, key):
"""Encrypts a string 'message' with the public key 'key'"""
if 'n' not in key:
raise Exception("You must use the public key with encrypt")
return chopstring(message, key['e'], key['n'], encrypt_int)
def sign(message, key):
"""Signs a string 'message' with the private key 'key'"""
if 'p' not in key:
raise Exception("You must use the private key with sign")
return chopstring(message, key['d'], key['p']*key['q'], encrypt_int)
def decrypt(cypher, key):
"""Decrypts a string 'cypher' with the private key 'key'"""
if 'p' not in key:
raise Exception("You must use the private key with decrypt")
return gluechops(cypher, key['d'], key['p']*key['q'], decrypt_int)
def verify(cypher, key):
"""Verifies a string 'cypher' with the public key 'key'"""
if 'n' not in key:
raise Exception("You must use the public key with verify")
return gluechops(cypher, key['e'], key['n'], decrypt_int)
# Do doctest if we're not imported
if __name__ == "__main__":
import doctest
doctest.testmod()
__all__ = ["newkeys", "encrypt", "decrypt", "sign", "verify"]
|
mxOBS/deb-pkg_trusty_chromium-browser
|
refs/heads/master
|
tools/json_schema_compiler/json_schema.py
|
81
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import json_parse
def DeleteNodes(item, delete_key=None, matcher=None):
"""Deletes certain nodes in item, recursively. If |delete_key| is set, all
dicts with |delete_key| as an attribute are deleted. If a callback is passed
as |matcher|, |DeleteNodes| will delete all dicts for which matcher(dict)
returns True.
"""
assert (delete_key is not None) != (matcher is not None)
def ShouldDelete(thing):
return json_parse.IsDict(thing) and (
delete_key is not None and delete_key in thing or
matcher is not None and matcher(thing))
if json_parse.IsDict(item):
toDelete = []
for key, value in item.items():
if ShouldDelete(value):
toDelete.append(key)
else:
DeleteNodes(value, delete_key, matcher)
for key in toDelete:
del item[key]
elif type(item) == list:
item[:] = [DeleteNodes(thing, delete_key, matcher)
for thing in item if not ShouldDelete(thing)]
return item
def Load(filename):
with open(filename, 'r') as handle:
schemas = json_parse.Parse(handle.read())
return schemas
# A dictionary mapping |filename| to the object resulting from loading the JSON
# at |filename|.
_cache = {}
def CachedLoad(filename):
"""Equivalent to Load(filename), but caches results for subsequent calls"""
if filename not in _cache:
_cache[filename] = Load(filename)
# Return a copy of the object so that any changes a caller makes won't affect
# the next caller.
return copy.deepcopy(_cache[filename])
|
watonyweng/nova
|
refs/heads/master
|
nova/tests/unit/objects/test_instance_action.py
|
52
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import traceback
import mock
from oslo_utils import timeutils
import six
from nova import db
from nova.objects import instance_action
from nova import test
from nova.tests.unit.objects import test_objects
NOW = timeutils.utcnow().replace(microsecond=0)
fake_action = {
'created_at': NOW,
'deleted_at': None,
'updated_at': None,
'deleted': False,
'id': 123,
'action': 'fake-action',
'instance_uuid': 'fake-uuid',
'request_id': 'fake-request',
'user_id': 'fake-user',
'project_id': 'fake-project',
'start_time': NOW,
'finish_time': None,
'message': 'foo',
}
fake_event = {
'created_at': NOW,
'deleted_at': None,
'updated_at': None,
'deleted': False,
'id': 123,
'event': 'fake-event',
'action_id': 123,
'start_time': NOW,
'finish_time': None,
'result': 'fake-result',
'traceback': 'fake-tb',
}
class _TestInstanceActionObject(object):
@mock.patch.object(db, 'action_get_by_request_id')
def test_get_by_request_id(self, mock_get):
context = self.context
mock_get.return_value = fake_action
action = instance_action.InstanceAction.get_by_request_id(
context, 'fake-uuid', 'fake-request')
self.compare_obj(action, fake_action)
mock_get.assert_called_once_with(context,
'fake-uuid', 'fake-request')
def test_pack_action_start(self):
values = instance_action.InstanceAction.pack_action_start(
self.context, 'fake-uuid', 'fake-action')
self.assertEqual(values['request_id'], self.context.request_id)
self.assertEqual(values['user_id'], self.context.user_id)
self.assertEqual(values['project_id'], self.context.project_id)
self.assertEqual(values['instance_uuid'], 'fake-uuid')
self.assertEqual(values['action'], 'fake-action')
self.assertEqual(values['start_time'].replace(tzinfo=None),
self.context.timestamp)
def test_pack_action_finish(self):
timeutils.set_time_override(override_time=NOW)
values = instance_action.InstanceAction.pack_action_finish(
self.context, 'fake-uuid')
self.assertEqual(values['request_id'], self.context.request_id)
self.assertEqual(values['instance_uuid'], 'fake-uuid')
self.assertEqual(values['finish_time'].replace(tzinfo=None), NOW)
@mock.patch.object(db, 'action_start')
def test_action_start(self, mock_start):
test_class = instance_action.InstanceAction
expected_packed_values = test_class.pack_action_start(
self.context, 'fake-uuid', 'fake-action')
mock_start.return_value = fake_action
action = instance_action.InstanceAction.action_start(
self.context, 'fake-uuid', 'fake-action', want_result=True)
mock_start.assert_called_once_with(self.context,
expected_packed_values)
self.compare_obj(action, fake_action)
@mock.patch.object(db, 'action_start')
def test_action_start_no_result(self, mock_start):
test_class = instance_action.InstanceAction
expected_packed_values = test_class.pack_action_start(
self.context, 'fake-uuid', 'fake-action')
mock_start.return_value = fake_action
action = instance_action.InstanceAction.action_start(
self.context, 'fake-uuid', 'fake-action', want_result=False)
mock_start.assert_called_once_with(self.context,
expected_packed_values)
self.assertIsNone(action)
@mock.patch.object(db, 'action_finish')
def test_action_finish(self, mock_finish):
timeutils.set_time_override(override_time=NOW)
test_class = instance_action.InstanceAction
expected_packed_values = test_class.pack_action_finish(
self.context, 'fake-uuid')
mock_finish.return_value = fake_action
action = instance_action.InstanceAction.action_finish(
self.context, 'fake-uuid', want_result=True)
mock_finish.assert_called_once_with(self.context,
expected_packed_values)
self.compare_obj(action, fake_action)
@mock.patch.object(db, 'action_finish')
def test_action_finish_no_result(self, mock_finish):
timeutils.set_time_override(override_time=NOW)
test_class = instance_action.InstanceAction
expected_packed_values = test_class.pack_action_finish(
self.context, 'fake-uuid')
mock_finish.return_value = fake_action
action = instance_action.InstanceAction.action_finish(
self.context, 'fake-uuid', want_result=False)
mock_finish.assert_called_once_with(self.context,
expected_packed_values)
self.assertIsNone(action)
@mock.patch.object(db, 'action_finish')
@mock.patch.object(db, 'action_start')
def test_finish(self, mock_start, mock_finish):
timeutils.set_time_override(override_time=NOW)
expected_packed_action_start = {
'request_id': self.context.request_id,
'user_id': self.context.user_id,
'project_id': self.context.project_id,
'instance_uuid': 'fake-uuid',
'action': 'fake-action',
'start_time': self.context.timestamp,
}
expected_packed_action_finish = {
'request_id': self.context.request_id,
'instance_uuid': 'fake-uuid',
'finish_time': NOW,
}
mock_start.return_value = fake_action
mock_finish.return_value = fake_action
action = instance_action.InstanceAction.action_start(
self.context, 'fake-uuid', 'fake-action')
action.finish()
mock_start.assert_called_once_with(self.context,
expected_packed_action_start)
mock_finish.assert_called_once_with(self.context,
expected_packed_action_finish)
self.compare_obj(action, fake_action)
@mock.patch.object(db, 'actions_get')
def test_get_list(self, mock_get):
fake_actions = [dict(fake_action, id=1234),
dict(fake_action, id=5678)]
mock_get.return_value = fake_actions
obj_list = instance_action.InstanceActionList.get_by_instance_uuid(
self.context, 'fake-uuid')
for index, action in enumerate(obj_list):
self.compare_obj(action, fake_actions[index])
mock_get.assert_called_once_with(self.context, 'fake-uuid')
class TestInstanceActionObject(test_objects._LocalTest,
_TestInstanceActionObject):
pass
class TestRemoteInstanceActionObject(test_objects._RemoteTest,
_TestInstanceActionObject):
pass
class _TestInstanceActionEventObject(object):
@mock.patch.object(db, 'action_event_get_by_id')
def test_get_by_id(self, mock_get):
mock_get.return_value = fake_event
event = instance_action.InstanceActionEvent.get_by_id(
self.context, 'fake-action-id', 'fake-event-id')
self.compare_obj(event, fake_event)
mock_get.assert_called_once_with(self.context,
'fake-action-id', 'fake-event-id')
@mock.patch.object(db, 'action_event_start')
def test_event_start(self, mock_start):
timeutils.set_time_override(override_time=NOW)
test_class = instance_action.InstanceActionEvent
expected_packed_values = test_class.pack_action_event_start(
self.context, 'fake-uuid', 'fake-event')
mock_start.return_value = fake_event
event = instance_action.InstanceActionEvent.event_start(
self.context, 'fake-uuid', 'fake-event', want_result=True)
mock_start.assert_called_once_with(self.context,
expected_packed_values)
self.compare_obj(event, fake_event)
@mock.patch.object(db, 'action_event_start')
def test_event_start_no_result(self, mock_start):
timeutils.set_time_override(override_time=NOW)
test_class = instance_action.InstanceActionEvent
expected_packed_values = test_class.pack_action_event_start(
self.context, 'fake-uuid', 'fake-event')
mock_start.return_value = fake_event
event = instance_action.InstanceActionEvent.event_start(
self.context, 'fake-uuid', 'fake-event', want_result=False)
mock_start.assert_called_once_with(self.context,
expected_packed_values)
self.assertIsNone(event)
@mock.patch.object(db, 'action_event_finish')
def test_event_finish(self, mock_finish):
timeutils.set_time_override(override_time=NOW)
test_class = instance_action.InstanceActionEvent
expected_packed_values = test_class.pack_action_event_finish(
self.context, 'fake-uuid', 'fake-event')
expected_packed_values['finish_time'] = timeutils.utcnow()
mock_finish.return_value = fake_event
event = instance_action.InstanceActionEvent.event_finish(
self.context, 'fake-uuid', 'fake-event', want_result=True)
mock_finish.assert_called_once_with(self.context,
expected_packed_values)
self.compare_obj(event, fake_event)
@mock.patch.object(db, 'action_event_finish')
def test_event_finish_no_result(self, mock_finish):
timeutils.set_time_override(override_time=NOW)
test_class = instance_action.InstanceActionEvent
expected_packed_values = test_class.pack_action_event_finish(
self.context, 'fake-uuid', 'fake-event')
expected_packed_values['finish_time'] = timeutils.utcnow()
mock_finish.return_value = fake_event
event = instance_action.InstanceActionEvent.event_finish(
self.context, 'fake-uuid', 'fake-event', want_result=False)
mock_finish.assert_called_once_with(self.context,
expected_packed_values)
self.assertIsNone(event)
@mock.patch.object(traceback, 'format_tb')
@mock.patch.object(db, 'action_event_finish')
def test_event_finish_with_failure(self, mock_finish, mock_tb):
timeutils.set_time_override(override_time=NOW)
test_class = instance_action.InstanceActionEvent
expected_packed_values = test_class.pack_action_event_finish(
self.context, 'fake-uuid', 'fake-event', 'val', 'fake-tb')
expected_packed_values['finish_time'] = timeutils.utcnow()
mock_finish.return_value = fake_event
event = test_class.event_finish_with_failure(
self.context, 'fake-uuid', 'fake-event', 'val', 'fake-tb',
want_result=True)
mock_finish.assert_called_once_with(self.context,
expected_packed_values)
self.compare_obj(event, fake_event)
@mock.patch.object(traceback, 'format_tb')
@mock.patch.object(db, 'action_event_finish')
def test_event_finish_with_failure_legacy(self, mock_finish, mock_tb):
# Tests that exc_tb is serialized when it's not a string type.
mock_tb.return_value = 'fake-tb'
timeutils.set_time_override(override_time=NOW)
test_class = instance_action.InstanceActionEvent
expected_packed_values = test_class.pack_action_event_finish(
self.context, 'fake-uuid', 'fake-event', 'val', 'fake-tb')
expected_packed_values['finish_time'] = timeutils.utcnow()
mock_finish.return_value = fake_event
fake_tb = mock.sentinel.fake_tb
event = test_class.event_finish_with_failure(
self.context, 'fake-uuid', 'fake-event', exc_val='val',
exc_tb=fake_tb, want_result=True)
mock_finish.assert_called_once_with(self.context,
expected_packed_values)
self.compare_obj(event, fake_event)
mock_tb.assert_called_once_with(fake_tb)
@mock.patch.object(db, 'action_event_finish')
def test_event_finish_with_failure_legacy_unicode(self, mock_finish):
# Tests that traceback.format_tb is not called when exc_tb is unicode.
timeutils.set_time_override(override_time=NOW)
test_class = instance_action.InstanceActionEvent
expected_packed_values = test_class.pack_action_event_finish(
self.context, 'fake-uuid', 'fake-event', 'val',
six.text_type('fake-tb'))
expected_packed_values['finish_time'] = timeutils.utcnow()
mock_finish.return_value = fake_event
event = test_class.event_finish_with_failure(
self.context, 'fake-uuid', 'fake-event', exc_val='val',
exc_tb=six.text_type('fake-tb'), want_result=True)
mock_finish.assert_called_once_with(self.context,
expected_packed_values)
self.compare_obj(event, fake_event)
@mock.patch.object(traceback, 'format_tb')
@mock.patch.object(db, 'action_event_finish')
def test_event_finish_with_failure_no_result(self, mock_finish, mock_tb):
# Tests that traceback.format_tb is not called when exc_tb is a str
# and want_result is False, so no event should come back.
mock_tb.return_value = 'fake-tb'
timeutils.set_time_override(override_time=NOW)
test_class = instance_action.InstanceActionEvent
expected_packed_values = test_class.pack_action_event_finish(
self.context, 'fake-uuid', 'fake-event', 'val', 'fake-tb')
expected_packed_values['finish_time'] = timeutils.utcnow()
mock_finish.return_value = fake_event
event = test_class.event_finish_with_failure(
self.context, 'fake-uuid', 'fake-event', 'val', 'fake-tb',
want_result=False)
mock_finish.assert_called_once_with(self.context,
expected_packed_values)
self.assertIsNone(event)
self.assertFalse(mock_tb.called)
@mock.patch.object(db, 'action_events_get')
def test_get_by_action(self, mock_get):
fake_events = [dict(fake_event, id=1234),
dict(fake_event, id=5678)]
mock_get.return_value = fake_events
obj_list = instance_action.InstanceActionEventList.get_by_action(
self.context, 'fake-action-id')
for index, event in enumerate(obj_list):
self.compare_obj(event, fake_events[index])
mock_get.assert_called_once_with(self.context, 'fake-action-id')
@mock.patch('nova.objects.instance_action.InstanceActionEvent.'
'pack_action_event_finish')
@mock.patch('traceback.format_tb')
def test_event_finish_with_failure_serialized(self, mock_format,
mock_pack):
mock_format.return_value = 'traceback'
mock_pack.side_effect = test.TestingException
self.assertRaises(
test.TestingException,
instance_action.InstanceActionEvent.event_finish_with_failure,
self.context, 'fake-uuid', 'fake-event',
exc_val=mock.sentinel.exc_val,
exc_tb=mock.sentinel.exc_tb)
mock_pack.assert_called_once_with(self.context, 'fake-uuid',
'fake-event',
exc_val=str(mock.sentinel.exc_val),
exc_tb='traceback')
mock_format.assert_called_once_with(mock.sentinel.exc_tb)
class TestInstanceActionEventObject(test_objects._LocalTest,
_TestInstanceActionEventObject):
pass
class TestRemoteInstanceActionEventObject(test_objects._RemoteTest,
_TestInstanceActionEventObject):
pass
|
blueskycoco/sq-linux
|
refs/heads/master
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py
|
12980
|
# SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
|
DTUWindEnergy/Python4WindEnergy
|
refs/heads/master
|
lesson 7/UnitTesting/fibmodule.py
|
1
|
"""
Functions to compute Fibonacci sequences
"""
import numpy as np
from numpy.testing import assert_allclose
def fib(N):
"""
Compute the first N Fibonacci numbers
Parameters
----------
N : integer
The number of Fibonacci numbers to compute
Returns
-------
x : np.ndarray
the length-N array containing the first N
Fibonacci numbers.
Notes
-----
This is a pure Python implementation. For large N,
consider a Cython implementation
Examples
--------
>>> fib(5)
array([ 0., 1., 1., 2., 3.])
"""
x = np.zeros(N, dtype=float)
for i in range(N):
if i == 0:
x[i] = 0
elif i == 1:
x[i] = 1
else:
x[i] = x[i - 1] + x[i - 2]
return x
def test_first_ten():
nums = fib(10)
assert_allclose(fib(10),
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34])
|
mbareta/edx-platform-ft
|
refs/heads/open-release/eucalyptus.master
|
lms/djangoapps/commerce/tests/__init__.py
|
25
|
# -*- coding: utf-8 -*-
""" Commerce app tests package. """
import datetime
import json
from django.conf import settings
from django.contrib.auth.models import User
from django.test import TestCase
from django.test.utils import override_settings
from freezegun import freeze_time
import httpretty
import jwt
import mock
from edx_rest_api_client import auth
from openedx.core.djangoapps.commerce.utils import ecommerce_api_client
from student.tests.factories import UserFactory
JSON = 'application/json'
TEST_PUBLIC_URL_ROOT = 'http://www.example.com'
TEST_API_URL = 'http://www-internal.example.com/api'
TEST_API_SIGNING_KEY = 'edx'
TEST_BASKET_ID = 7
TEST_ORDER_NUMBER = '100004'
TEST_PAYMENT_DATA = {
'payment_processor_name': 'test-processor',
'payment_form_data': {},
'payment_page_url': 'http://example.com/pay',
}
@override_settings(ECOMMERCE_API_SIGNING_KEY=TEST_API_SIGNING_KEY, ECOMMERCE_API_URL=TEST_API_URL)
class EdxRestApiClientTest(TestCase):
""" Tests to ensure the client is initialized properly. """
TEST_USER_EMAIL = 'test@example.com'
TEST_CLIENT_ID = 'test-client-id'
def setUp(self):
super(EdxRestApiClientTest, self).setUp()
self.user = UserFactory()
self.user.email = self.TEST_USER_EMAIL
self.user.save() # pylint: disable=no-member
@httpretty.activate
@freeze_time('2015-7-2')
@override_settings(JWT_AUTH={'JWT_ISSUER': 'http://example.com/oauth', 'JWT_EXPIRATION': 30})
def test_tracking_context(self):
"""
Ensure the tracking context is set up in the api client correctly and
automatically.
"""
# fake an ecommerce api request.
httpretty.register_uri(
httpretty.POST,
'{}/baskets/1/'.format(TEST_API_URL),
status=200, body='{}',
adding_headers={'Content-Type': JSON}
)
mock_tracker = mock.Mock()
mock_tracker.resolve_context = mock.Mock(return_value={'client_id': self.TEST_CLIENT_ID, 'ip': '127.0.0.1'})
with mock.patch('openedx.core.djangoapps.commerce.utils.tracker.get_tracker', return_value=mock_tracker):
ecommerce_api_client(self.user).baskets(1).post()
# make sure the request's JWT token payload included correct tracking context values.
actual_header = httpretty.last_request().headers['Authorization']
expected_payload = {
'username': self.user.username,
'full_name': self.user.profile.name,
'email': self.user.email,
'iss': settings.JWT_AUTH['JWT_ISSUER'],
'exp': datetime.datetime.utcnow() + datetime.timedelta(seconds=settings.JWT_AUTH['JWT_EXPIRATION']),
'tracking_context': {
'lms_user_id': self.user.id, # pylint: disable=no-member
'lms_client_id': self.TEST_CLIENT_ID,
'lms_ip': '127.0.0.1',
},
}
expected_header = 'JWT {}'.format(jwt.encode(expected_payload, TEST_API_SIGNING_KEY))
self.assertEqual(actual_header, expected_header)
@httpretty.activate
def test_client_unicode(self):
"""
The client should handle json responses properly when they contain
unicode character data.
Regression test for ECOM-1606.
"""
expected_content = '{"result": "Préparatoire"}'
httpretty.register_uri(
httpretty.GET,
'{}/baskets/1/order/'.format(TEST_API_URL),
status=200, body=expected_content,
adding_headers={'Content-Type': JSON},
)
actual_object = ecommerce_api_client(self.user).baskets(1).order.get()
self.assertEqual(actual_object, {u"result": u"Préparatoire"})
def test_client_with_user_without_profile(self):
"""
Verify client initialize successfully for users having no profile.
"""
worker = User.objects.create_user(username='test_worker', email='test@example.com')
api_client = ecommerce_api_client(worker)
self.assertEqual(api_client._store['session'].auth.__dict__['username'], worker.username) # pylint: disable=protected-access
self.assertIsNone(api_client._store['session'].auth.__dict__['full_name']) # pylint: disable=protected-access
|
RafaelOrtiz/EbookReader
|
refs/heads/master
|
epubview/jobs.py
|
3
|
# Copyright 2009 One Laptop Per Child
# Author: Sayamindu Dasgupta <sayamindu@laptop.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import gobject
import gtk
import widgets
import cairo
import math
import os.path
import BeautifulSoup
import epub
import threading
PAGE_WIDTH = 135
PAGE_HEIGHT = 216
def _pixel_to_mm(pixel, dpi):
inches = pixel / dpi
return int(inches / 0.03937)
def _mm_to_pixel(mm, dpi):
inches = mm * 0.03937
return int(inches * dpi)
class SearchThread(threading.Thread):
def __init__(self, obj):
threading.Thread.__init__(self)
self.obj = obj
self.stopthread = threading.Event()
def _start_search(self):
for entry in self.obj.flattoc:
if self.stopthread.isSet():
break
filepath = os.path.join(self.obj._document.get_basedir(), entry)
f = open(filepath)
if self._searchfile(f):
self.obj._matchfilelist.append(entry)
f.close()
gtk.gdk.threads_enter()
self.obj._finished = True
self.obj.emit('updated')
gtk.gdk.threads_leave()
return False
def _searchfile(self, fileobj):
soup = BeautifulSoup.BeautifulSoup(fileobj)
body = soup.find('body')
tags = body.findChildren(True)
for tag in tags:
if not tag.string is None:
if tag.string.find(self.obj._text) > -1:
return True
return False
def run(self):
self._start_search()
def stop(self):
self.stopthread.set()
class _JobPaginator(gobject.GObject):
__gsignals__ = {
'paginated': (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, ([])),
}
def __init__(self, filelist):
gobject.GObject.__init__(self)
self._filelist = filelist
self._filedict = {}
self._pagemap = {}
self._bookheight = 0
self._count = 0
self._pagecount = 0
self._screen = gtk.gdk.screen_get_default()
self._old_fontoptions = self._screen.get_font_options()
options = cairo.FontOptions()
options.set_hint_style(cairo.HINT_STYLE_MEDIUM)
options.set_antialias(cairo.ANTIALIAS_GRAY)
options.set_subpixel_order(cairo.SUBPIXEL_ORDER_DEFAULT)
options.set_hint_metrics(cairo.HINT_METRICS_DEFAULT)
self._screen.set_font_options(options)
self._temp_win = gtk.Window()
self._temp_view = widgets._WebView()
settings = self._temp_view.get_settings()
settings.props.default_font_family = 'DejaVu LGC Serif'
settings.props.sans_serif_font_family = 'DejaVu LGC Sans'
settings.props.serif_font_family = 'DejaVu LGC Serif'
settings.props.monospace_font_family = 'DejaVu LGC Sans Mono'
settings.props.enforce_96_dpi = True
#FIXME: This does not seem to work
#settings.props.auto_shrink_images = False
settings.props.enable_plugins = False
settings.props.default_font_size = 12
settings.props.default_monospace_font_size = 10
settings.props.default_encoding = 'utf-8'
sw = gtk.ScrolledWindow()
sw.set_policy(gtk.POLICY_NEVER, gtk.POLICY_NEVER)
self._dpi = 96
sw.set_size_request(_mm_to_pixel(PAGE_WIDTH, self._dpi),
_mm_to_pixel(PAGE_HEIGHT, self._dpi))
sw.add(self._temp_view)
self._temp_win.add(sw)
self._temp_view.connect('load-finished', self._page_load_finished_cb)
self._temp_win.show_all()
self._temp_win.unmap()
self._temp_view.open(self._filelist[self._count])
def _page_load_finished_cb(self, v, frame):
f = v.get_main_frame()
pageheight = v.get_page_height()
if pageheight <= _mm_to_pixel(PAGE_HEIGHT, self._dpi):
pages = 1
else:
pages = pageheight / float(_mm_to_pixel(PAGE_HEIGHT, self._dpi))
for i in range(1, int(math.ceil(pages) + 1)):
if pages - i < 0:
pagelen = (pages - math.floor(pages)) / pages
else:
pagelen = 1 / pages
self._pagemap[float(self._pagecount + i)] = \
(f.props.uri, (i - 1) / math.ceil(pages), pagelen)
self._pagecount += int(math.ceil(pages))
self._filedict[f.props.uri.replace('file://', '')] = \
(math.ceil(pages), math.ceil(pages) - pages)
self._bookheight += pageheight
if self._count + 1 >= len(self._filelist):
self._temp_win.destroy()
self._screen.set_font_options(self._old_fontoptions)
self.emit('paginated')
else:
self._count += 1
self._temp_view.open(self._filelist[self._count])
def get_file_for_pageno(self, pageno):
'''
Returns the file in which pageno occurs
'''
return self._pagemap[pageno][0]
def get_scrollfactor_pos_for_pageno(self, pageno):
'''
Returns the position scrollfactor (fraction) for pageno
'''
return self._pagemap[pageno][1]
def get_scrollfactor_len_for_pageno(self, pageno):
'''
Returns the length scrollfactor (fraction) for pageno
'''
return self._pagemap[pageno][2]
def get_pagecount_for_file(self, filename):
'''
Returns the number of pages in file
'''
return self._filedict[filename][0]
def get_base_pageno_for_file(self, filename):
'''
Returns the pageno which begins in filename
'''
for key in self._pagemap.keys():
if self._pagemap[key][0].replace('file://', '') == filename:
return key
return None
def get_remfactor_for_file(self, filename):
'''
Returns the remainder
factor (1 - fraction length of last page in file)
'''
return self._filedict[filename][1]
def get_total_pagecount(self):
'''
Returns the total pagecount for the Epub file
'''
return self._pagecount
def get_total_height(self):
'''
Returns the total height of the Epub in pixels
'''
return self._bookheight
class _JobFind(gobject.GObject):
__gsignals__ = {
'updated': (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, ([])),
}
def __init__(self, document, start_page, n_pages, text,
case_sensitive=False):
gobject.GObject.__init__(self)
gtk.gdk.threads_init()
self._finished = False
self._document = document
self._start_page = start_page
self._n_pages = n_pages
self._text = text
self._case_sensitive = case_sensitive
self.flattoc = self._document.get_flattoc()
self._matchfilelist = []
self._current_file_index = 0
self.threads = []
s_thread = SearchThread(self)
self.threads.append(s_thread)
s_thread.start()
def cancel(self):
'''
Cancels the search job
'''
for s_thread in self.threads:
s_thread.stop()
def is_finished(self):
'''
Returns True if the entire search job has been finished
'''
return self._finished
def get_next_file(self):
'''
Returns the next file which has the search pattern
'''
self._current_file_index += 1
try:
path = self._matchfilelist[self._current_file_index]
except IndexError:
self._current_file_index = 0
path = self._matchfilelist[self._current_file_index]
return path
def get_prev_file(self):
'''
Returns the previous file which has the search pattern
'''
self._current_file_index -= 1
try:
path = self._matchfilelist[self._current_file_index]
except IndexError:
self._current_file_index = -1
path = self._matchfilelist[self._current_file_index]
return path
def get_search_text(self):
'''
Returns the search text
'''
return self._text
def get_case_sensitive(self):
'''
Returns True if the search is case-sensitive
'''
return self._case_sensitive
|
jelugbo/ddi
|
refs/heads/master
|
lms/djangoapps/courseware/management/commands/tests/__init__.py
|
12133432
| |
haoxli/crosswalk-test-suite
|
refs/heads/master
|
tools/allpairs-plus/metacomm/combinatorics/__init__.py
|
12133432
| |
rd37/horizon
|
refs/heads/master
|
openstack_dashboard/dashboards/admin/domains/__init__.py
|
12133432
| |
sbktechnology/sap_frappe
|
refs/heads/master
|
frappe/integrations/__init__.py
|
12133432
| |
alazyer/oscar
|
refs/heads/master
|
frobshop/oscar/management/commands/oscar_send_alerts.py
|
43
|
import logging
from django.utils.translation import ugettext_lazy as _
from django.core.management.base import BaseCommand
from oscar.apps.customer.alerts import utils
logger = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Check stock records of products for availability and send out alerts
to customers that have registered for an alert.
"""
help = _("Check for products that are back in "
"stock and send out alerts")
def handle(self, **options):
"""
Check all products with active product alerts for
availability and send out email alerts when a product is
available to buy.
"""
utils.send_alerts()
|
vipul-sharma20/oh-mainline
|
refs/heads/master
|
mysite/profile/migrations/0015_asheesh_make_project_name_unique.py
|
17
|
# This file is part of OpenHatch.
# Copyright (C) 2009 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from south.db import db
from django.db import models
from mysite.profile.models import *
class Migration:
def forwards(self, orm):
# Changing field 'Person.time_record_was_created'
db.alter_column('profile_person', 'time_record_was_created', models.DateTimeField(default=datetime.datetime(2009, 6, 21, 12, 45, 31, 878127)))
# Changing field 'Link_ProjectExp_Tag.time_record_was_created'
db.alter_column('profile_link_projectexp_tag', 'time_record_was_created', models.DateTimeField(default=datetime.datetime(2009, 6, 21, 12, 45, 32, 159881)))
# Changing field 'Link_Project_Tag.time_record_was_created'
db.alter_column('profile_link_project_tag', 'time_record_was_created', models.DateTimeField(default=datetime.datetime(2009, 6, 21, 12, 45, 32, 215414)))
def backwards(self, orm):
# Changing field 'Person.time_record_was_created'
db.alter_column('profile_person', 'time_record_was_created', models.DateTimeField(default=datetime.datetime(2009, 6, 20, 14, 24, 45, 875695)))
# Changing field 'Link_ProjectExp_Tag.time_record_was_created'
db.alter_column('profile_link_projectexp_tag', 'time_record_was_created', models.DateTimeField(default=datetime.datetime(2009, 6, 20, 14, 24, 45, 984494)))
# Changing field 'Link_Project_Tag.time_record_was_created'
db.alter_column('profile_link_project_tag', 'time_record_was_created', models.DateTimeField(default=datetime.datetime(2009, 6, 20, 14, 24, 46, 134691)))
models = {
'profile.person': {
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'interested_in_working_on': ('models.CharField', [], {'default': "''", 'max_length': '1024'}),
'last_polled': ('models.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_touched': ('models.DateTimeField', [], {'null': 'True'}),
'name': ('models.CharField', [], {'max_length': '200'}),
'password_hash_md5': ('models.CharField', [], {'max_length': '200'}),
'poll_on_next_web_view': ('models.BooleanField', [], {'default': 'True'}),
'time_record_was_created': ('models.DateTimeField', [], {'default': 'datetime.datetime(2009, 6, 21, 12, 45, 32, 593402)'}),
'username': ('models.CharField', [], {'max_length': '200'})
},
'profile.tag': {
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'tag_type': ('models.ForeignKey', ["orm['profile.TagType']"], {}),
'text': ('models.CharField', [], {'max_length': '50'})
},
'profile.link_projectexp_tag': {
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'project_exp': ('models.ForeignKey', ["orm['profile.ProjectExp']"], {}),
'source': ('models.CharField', [], {'max_length': '200'}),
'tag': ('models.ForeignKey', ["orm['profile.Tag']"], {}),
'time_record_was_created': ('models.DateTimeField', [], {'default': 'datetime.datetime(2009, 6, 21, 12, 45, 32, 288061)'})
},
'search.project': {
'_stub': True,
'id': ('models.AutoField', [], {'primary_key': 'True'})
},
'profile.link_project_tag': {
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'project': ('models.ForeignKey', ["orm['search.Project']"], {}),
'source': ('models.CharField', [], {'max_length': '200'}),
'tag': ('models.ForeignKey', ["orm['profile.Tag']"], {}),
'time_record_was_created': ('models.DateTimeField', [], {'default': 'datetime.datetime(2009, 6, 21, 12, 45, 32, 536696)'})
},
'profile.tagtype': {
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'name': ('models.CharField', [], {'max_length': '100'}),
'prefix': ('models.CharField', [], {'max_length': '20'})
},
'profile.projectexp': {
'description': ('models.TextField', [], {}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'last_touched': ('models.DateTimeField', [], {'null': 'True'}),
'man_months': ('models.PositiveIntegerField', [], {'null': 'True'}),
'person': ('models.ForeignKey', ["orm['profile.Person']"], {}),
'person_role': ('models.CharField', [], {'max_length': '200'}),
'primary_language': ('models.CharField', [], {'max_length': '200', 'null': 'True'}),
'project': ('models.ForeignKey', ["orm['search.Project']"], {}),
'source': ('models.CharField', [], {'max_length': '100', 'null': 'True'}),
'time_record_was_created': ('models.DateTimeField', [], {'null': 'True'}),
'url': ('models.URLField', [], {'max_length': '200', 'null': 'True'})
}
}
complete_apps = ['profile']
|
leansoft/edx-platform
|
refs/heads/master
|
lms/djangoapps/certificates/migrations/0010_auto__del_field_generatedcertificate_enabled__add_field_generatedcerti.py
|
188
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'GeneratedCertificate.enabled'
db.delete_column('certificates_generatedcertificate', 'enabled')
# Adding field 'GeneratedCertificate.status'
db.add_column('certificates_generatedcertificate', 'status',
self.gf('django.db.models.fields.CharField')(default='unavailable', max_length=32),
keep_default=False)
def backwards(self, orm):
# Adding field 'GeneratedCertificate.enabled'
db.add_column('certificates_generatedcertificate', 'enabled',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Deleting field 'GeneratedCertificate.status'
db.delete_column('certificates_generatedcertificate', 'status')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'certificates.generatedcertificate': {
'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'GeneratedCertificate'},
'certificate_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'distinction': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'download_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}),
'grade': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '5', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'unavailable'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['certificates']
|
Tejal011089/huntercamp_erpnext
|
refs/heads/develop
|
erpnext/projects/doctype/time_log_batch_detail/time_log_batch_detail.py
|
65
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class TimeLogBatchDetail(Document):
pass
|
idncom/odoo
|
refs/heads/8.0
|
addons/l10n_no/__init__.py
|
693
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
netgroup-polito/dpdk
|
refs/heads/directvm2vm_new
|
tools/dpdk_nic_bind.py
|
9
|
#! /usr/bin/python
#
# BSD LICENSE
#
# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import sys, os, getopt, subprocess
from os.path import exists, abspath, dirname, basename
# The PCI device class for ETHERNET devices
ETHERNET_CLASS = "0200"
# global dict ethernet devices present. Dictionary indexed by PCI address.
# Each device within this is itself a dictionary of device properties
devices = {}
# list of supported DPDK drivers
dpdk_drivers = [ "igb_uio", "vfio-pci", "uio_pci_generic" ]
# command-line arg flags
b_flag = None
status_flag = False
force_flag = False
args = []
def usage():
'''Print usage information for the program'''
argv0 = basename(sys.argv[0])
print """
Usage:
------
%(argv0)s [options] DEVICE1 DEVICE2 ....
where DEVICE1, DEVICE2 etc, are specified via PCI "domain:bus:slot.func" syntax
or "bus:slot.func" syntax. For devices bound to Linux kernel drivers, they may
also be referred to by Linux interface name e.g. eth0, eth1, em0, em1, etc.
Options:
--help, --usage:
Display usage information and quit
-s, --status:
Print the current status of all known network interfaces.
For each device, it displays the PCI domain, bus, slot and function,
along with a text description of the device. Depending upon whether the
device is being used by a kernel driver, the igb_uio driver, or no
driver, other relevant information will be displayed:
* the Linux interface name e.g. if=eth0
* the driver being used e.g. drv=igb_uio
* any suitable drivers not currently using that device
e.g. unused=igb_uio
NOTE: if this flag is passed along with a bind/unbind option, the status
display will always occur after the other operations have taken place.
-b driver, --bind=driver:
Select the driver to use or \"none\" to unbind the device
-u, --unbind:
Unbind a device (Equivalent to \"-b none\")
--force:
By default, devices which are used by Linux - as indicated by having
routes in the routing table - cannot be modified. Using the --force
flag overrides this behavior, allowing active links to be forcibly
unbound.
WARNING: This can lead to loss of network connection and should be used
with caution.
Examples:
---------
To display current device status:
%(argv0)s --status
To bind eth1 from the current driver and move to use igb_uio
%(argv0)s --bind=igb_uio eth1
To unbind 0000:01:00.0 from using any driver
%(argv0)s -u 0000:01:00.0
To bind 0000:02:00.0 and 0000:02:00.1 to the ixgbe kernel driver
%(argv0)s -b ixgbe 02:00.0 02:00.1
""" % locals() # replace items from local variables
# This is roughly compatible with check_output function in subprocess module
# which is only available in python 2.7.
def check_output(args, stderr=None):
'''Run a command and capture its output'''
return subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=stderr).communicate()[0]
def find_module(mod):
'''find the .ko file for kernel module named mod.
Searches the $RTE_SDK/$RTE_TARGET directory, the kernel
modules directory and finally under the parent directory of
the script '''
# check $RTE_SDK/$RTE_TARGET directory
if 'RTE_SDK' in os.environ and 'RTE_TARGET' in os.environ:
path = "%s/%s/kmod/%s.ko" % (os.environ['RTE_SDK'],\
os.environ['RTE_TARGET'], mod)
if exists(path):
return path
# check using depmod
try:
depmod_out = check_output(["modinfo", "-n", mod], \
stderr=subprocess.STDOUT).lower()
if "error" not in depmod_out:
path = depmod_out.strip()
if exists(path):
return path
except: # if modinfo can't find module, it fails, so continue
pass
# check for a copy based off current path
tools_dir = dirname(abspath(sys.argv[0]))
if (tools_dir.endswith("tools")):
base_dir = dirname(tools_dir)
find_out = check_output(["find", base_dir, "-name", mod + ".ko"])
if len(find_out) > 0: #something matched
path = find_out.splitlines()[0]
if exists(path):
return path
def check_modules():
'''Checks that igb_uio is loaded'''
global dpdk_drivers
fd = file("/proc/modules")
loaded_mods = fd.readlines()
fd.close()
# list of supported modules
mods = [{"Name" : driver, "Found" : False} for driver in dpdk_drivers]
# first check if module is loaded
for line in loaded_mods:
for mod in mods:
if line.startswith(mod["Name"]):
mod["Found"] = True
# special case for vfio_pci (module is named vfio-pci,
# but its .ko is named vfio_pci)
elif line.replace("_", "-").startswith(mod["Name"]):
mod["Found"] = True
# check if we have at least one loaded module
if True not in [mod["Found"] for mod in mods] and b_flag is not None:
if b_flag in dpdk_drivers:
print "Error - no supported modules(DPDK driver) are loaded"
sys.exit(1)
else:
print "Warning - no supported modules(DPDK driver) are loaded"
# change DPDK driver list to only contain drivers that are loaded
dpdk_drivers = [mod["Name"] for mod in mods if mod["Found"]]
def has_driver(dev_id):
'''return true if a device is assigned to a driver. False otherwise'''
return "Driver_str" in devices[dev_id]
def get_pci_device_details(dev_id):
'''This function gets additional details for a PCI device'''
device = {}
extra_info = check_output(["lspci", "-vmmks", dev_id]).splitlines()
# parse lspci details
for line in extra_info:
if len(line) == 0:
continue
name, value = line.split("\t", 1)
name = name.strip(":") + "_str"
device[name] = value
# check for a unix interface name
sys_path = "/sys/bus/pci/devices/%s/net/" % dev_id
if exists(sys_path):
device["Interface"] = ",".join(os.listdir(sys_path))
else:
device["Interface"] = ""
# check if a port is used for ssh connection
device["Ssh_if"] = False
device["Active"] = ""
return device
def get_nic_details():
'''This function populates the "devices" dictionary. The keys used are
the pci addresses (domain:bus:slot.func). The values are themselves
dictionaries - one for each NIC.'''
global devices
global dpdk_drivers
# clear any old data
devices = {}
# first loop through and read details for all devices
# request machine readable format, with numeric IDs
dev = {};
dev_lines = check_output(["lspci", "-Dvmmn"]).splitlines()
for dev_line in dev_lines:
if (len(dev_line) == 0):
if dev["Class"] == ETHERNET_CLASS:
#convert device and vendor ids to numbers, then add to global
dev["Vendor"] = int(dev["Vendor"],16)
dev["Device"] = int(dev["Device"],16)
devices[dev["Slot"]] = dict(dev) # use dict to make copy of dev
else:
name, value = dev_line.split("\t", 1)
dev[name.rstrip(":")] = value
# check what is the interface if any for an ssh connection if
# any to this host, so we can mark it later.
ssh_if = []
route = check_output(["ip", "-o", "route"])
# filter out all lines for 169.254 routes
route = "\n".join(filter(lambda ln: not ln.startswith("169.254"),
route.splitlines()))
rt_info = route.split()
for i in xrange(len(rt_info) - 1):
if rt_info[i] == "dev":
ssh_if.append(rt_info[i+1])
# based on the basic info, get extended text details
for d in devices.keys():
# get additional info and add it to existing data
devices[d] = dict(devices[d].items() +
get_pci_device_details(d).items())
for _if in ssh_if:
if _if in devices[d]["Interface"].split(","):
devices[d]["Ssh_if"] = True
devices[d]["Active"] = "*Active*"
break;
# add igb_uio to list of supporting modules if needed
if "Module_str" in devices[d]:
for driver in dpdk_drivers:
if driver not in devices[d]["Module_str"]:
devices[d]["Module_str"] = devices[d]["Module_str"] + ",%s" % driver
else:
devices[d]["Module_str"] = ",".join(dpdk_drivers)
# make sure the driver and module strings do not have any duplicates
if has_driver(d):
modules = devices[d]["Module_str"].split(",")
if devices[d]["Driver_str"] in modules:
modules.remove(devices[d]["Driver_str"])
devices[d]["Module_str"] = ",".join(modules)
def dev_id_from_dev_name(dev_name):
'''Take a device "name" - a string passed in by user to identify a NIC
device, and determine the device id - i.e. the domain:bus:slot.func - for
it, which can then be used to index into the devices array'''
dev = None
# check if it's already a suitable index
if dev_name in devices:
return dev_name
# check if it's an index just missing the domain part
elif "0000:" + dev_name in devices:
return "0000:" + dev_name
else:
# check if it's an interface name, e.g. eth1
for d in devices.keys():
if dev_name in devices[d]["Interface"].split(","):
return devices[d]["Slot"]
# if nothing else matches - error
print "Unknown device: %s. " \
"Please specify device in \"bus:slot.func\" format" % dev_name
sys.exit(1)
def unbind_one(dev_id, force):
'''Unbind the device identified by "dev_id" from its current driver'''
dev = devices[dev_id]
if not has_driver(dev_id):
print "%s %s %s is not currently managed by any driver\n" % \
(dev["Slot"], dev["Device_str"], dev["Interface"])
return
# prevent us disconnecting ourselves
if dev["Ssh_if"] and not force:
print "Routing table indicates that interface %s is active" \
". Skipping unbind" % (dev_id)
return
# write to /sys to unbind
filename = "/sys/bus/pci/drivers/%s/unbind" % dev["Driver_str"]
try:
f = open(filename, "a")
except:
print "Error: unbind failed for %s - Cannot open %s" % (dev_id, filename)
sys/exit(1)
f.write(dev_id)
f.close()
def bind_one(dev_id, driver, force):
'''Bind the device given by "dev_id" to the driver "driver". If the device
is already bound to a different driver, it will be unbound first'''
dev = devices[dev_id]
saved_driver = None # used to rollback any unbind in case of failure
# prevent disconnection of our ssh session
if dev["Ssh_if"] and not force:
print "Routing table indicates that interface %s is active" \
". Not modifying" % (dev_id)
return
# unbind any existing drivers we don't want
if has_driver(dev_id):
if dev["Driver_str"] == driver:
print "%s already bound to driver %s, skipping\n" % (dev_id, driver)
return
else:
saved_driver = dev["Driver_str"]
unbind_one(dev_id, force)
dev["Driver_str"] = "" # clear driver string
# if we are binding to one of DPDK drivers, add PCI id's to that driver
if driver in dpdk_drivers:
filename = "/sys/bus/pci/drivers/%s/new_id" % driver
try:
f = open(filename, "w")
except:
print "Error: bind failed for %s - Cannot open %s" % (dev_id, filename)
return
try:
f.write("%04x %04x" % (dev["Vendor"], dev["Device"]))
f.close()
except:
print "Error: bind failed for %s - Cannot write new PCI ID to " \
"driver %s" % (dev_id, driver)
return
# do the bind by writing to /sys
filename = "/sys/bus/pci/drivers/%s/bind" % driver
try:
f = open(filename, "a")
except:
print "Error: bind failed for %s - Cannot open %s" % (dev_id, filename)
if saved_driver is not None: # restore any previous driver
bind_one(dev_id, saved_driver, force)
return
try:
f.write(dev_id)
f.close()
except:
# for some reason, closing dev_id after adding a new PCI ID to new_id
# results in IOError. however, if the device was successfully bound,
# we don't care for any errors and can safely ignore IOError
tmp = get_pci_device_details(dev_id)
if "Driver_str" in tmp and tmp["Driver_str"] == driver:
return
print "Error: bind failed for %s - Cannot bind to driver %s" % (dev_id, driver)
if saved_driver is not None: # restore any previous driver
bind_one(dev_id, saved_driver, force)
return
def unbind_all(dev_list, force=False):
"""Unbind method, takes a list of device locations"""
dev_list = map(dev_id_from_dev_name, dev_list)
for d in dev_list:
unbind_one(d, force)
def bind_all(dev_list, driver, force=False):
"""Bind method, takes a list of device locations"""
global devices
dev_list = map(dev_id_from_dev_name, dev_list)
for d in dev_list:
bind_one(d, driver, force)
# when binding devices to a generic driver (i.e. one that doesn't have a
# PCI ID table), some devices that are not bound to any other driver could
# be bound even if no one has asked them to. hence, we check the list of
# drivers again, and see if some of the previously-unbound devices were
# erroneously bound.
for d in devices.keys():
# skip devices that were already bound or that we know should be bound
if "Driver_str" in devices[d] or d in dev_list:
continue
# update information about this device
devices[d] = dict(devices[d].items() +
get_pci_device_details(d).items())
# check if updated information indicates that the device was bound
if "Driver_str" in devices[d]:
unbind_one(d, force)
def display_devices(title, dev_list, extra_params = None):
'''Displays to the user the details of a list of devices given in "dev_list"
The "extra_params" parameter, if given, should contain a string with
%()s fields in it for replacement by the named fields in each device's
dictionary.'''
strings = [] # this holds the strings to print. We sort before printing
print "\n%s" % title
print "="*len(title)
if len(dev_list) == 0:
strings.append("<none>")
else:
for dev in dev_list:
if extra_params is not None:
strings.append("%s '%s' %s" % (dev["Slot"], \
dev["Device_str"], extra_params % dev))
else:
strings.append("%s '%s'" % (dev["Slot"], dev["Device_str"]))
# sort before printing, so that the entries appear in PCI order
strings.sort()
print "\n".join(strings) # print one per line
def show_status():
'''Function called when the script is passed the "--status" option. Displays
to the user what devices are bound to the igb_uio driver, the kernel driver
or to no driver'''
global dpdk_drivers
kernel_drv = []
dpdk_drv = []
no_drv = []
# split our list of devices into the three categories above
for d in devices.keys():
if not has_driver(d):
no_drv.append(devices[d])
continue
if devices[d]["Driver_str"] in dpdk_drivers:
dpdk_drv.append(devices[d])
else:
kernel_drv.append(devices[d])
# print each category separately, so we can clearly see what's used by DPDK
display_devices("Network devices using DPDK-compatible driver", dpdk_drv, \
"drv=%(Driver_str)s unused=%(Module_str)s")
display_devices("Network devices using kernel driver", kernel_drv,
"if=%(Interface)s drv=%(Driver_str)s unused=%(Module_str)s %(Active)s")
display_devices("Other network devices", no_drv,\
"unused=%(Module_str)s")
def parse_args():
'''Parses the command-line arguments given by the user and takes the
appropriate action for each'''
global b_flag
global status_flag
global force_flag
global args
if len(sys.argv) <= 1:
usage()
sys.exit(0)
try:
opts, args = getopt.getopt(sys.argv[1:], "b:us",
["help", "usage", "status", "force",
"bind=", "unbind"])
except getopt.GetoptError, error:
print str(error)
print "Run '%s --usage' for further information" % sys.argv[0]
sys.exit(1)
for opt, arg in opts:
if opt == "--help" or opt == "--usage":
usage()
sys.exit(0)
if opt == "--status" or opt == "-s":
status_flag = True
if opt == "--force":
force_flag = True
if opt == "-b" or opt == "-u" or opt == "--bind" or opt == "--unbind":
if b_flag is not None:
print "Error - Only one bind or unbind may be specified\n"
sys.exit(1)
if opt == "-u" or opt == "--unbind":
b_flag = "none"
else:
b_flag = arg
def do_arg_actions():
'''do the actual action requested by the user'''
global b_flag
global status_flag
global force_flag
global args
if b_flag is None and not status_flag:
print "Error: No action specified for devices. Please give a -b or -u option"
print "Run '%s --usage' for further information" % sys.argv[0]
sys.exit(1)
if b_flag is not None and len(args) == 0:
print "Error: No devices specified."
print "Run '%s --usage' for further information" % sys.argv[0]
sys.exit(1)
if b_flag == "none" or b_flag == "None":
unbind_all(args, force_flag)
elif b_flag is not None:
bind_all(args, b_flag, force_flag)
if status_flag:
if b_flag is not None:
get_nic_details() # refresh if we have changed anything
show_status()
def main():
'''program main function'''
parse_args()
check_modules()
get_nic_details()
do_arg_actions()
if __name__ == "__main__":
main()
|
ecosoft-odoo/odoo
|
refs/heads/8.0
|
addons/warning/__openerp__.py
|
261
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Warning Messages and Alerts',
'version': '1.0',
'category': 'Tools',
'description': """
Module to trigger warnings in OpenERP objects.
==============================================
Warning messages can be displayed for objects like sale order, purchase order,
picking and invoice. The message is triggered by the form's onchange event.
""",
'author': 'OpenERP SA',
'depends': ['base', 'sale_stock', 'purchase'],
'data': ['warning_view.xml'],
'demo': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
snahelou/awx
|
refs/heads/devel
|
awx/lib/isolated_callbacks/minimal.py
|
2
|
# Copyright (c) 2017 Ansible by Red Hat
#
# This file is part of Ansible Tower, but depends on code imported from Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
# Python
import os
import sys
# Add awx/lib to sys.path.
awx_lib_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
if awx_lib_path not in sys.path:
sys.path.insert(0, awx_lib_path)
# Tower Display Callback
from awx_display_callback import AWXMinimalCallbackModule as CallbackModule # noqa
|
c0710204/edx-platform
|
refs/heads/master
|
common/lib/xmodule/xmodule/modulestore/split_mongo/__init__.py
|
6
|
"""
General utilities
"""
from collections import namedtuple
from contracts import contract, check
from opaque_keys.edx.locator import BlockUsageLocator
class BlockKey(namedtuple('BlockKey', 'type id')):
__slots__ = ()
@contract(type="string[>0]")
def __new__(cls, type, id):
return super(BlockKey, cls).__new__(cls, type, id)
@classmethod
@contract(usage_key=BlockUsageLocator)
def from_usage_key(cls, usage_key):
return cls(usage_key.block_type, usage_key.block_id)
CourseEnvelope = namedtuple('CourseEnvelope', 'course_key structure')
|
orangeduck/PyAutoC
|
refs/heads/master
|
Python27/Lib/msilib/__init__.py
|
43
|
# -*- coding: iso-8859-1 -*-
# Copyright (C) 2005 Martin v. Löwis
# Licensed to PSF under a Contributor Agreement.
from _msi import *
import os, string, re, sys
AMD64 = "AMD64" in sys.version
Itanium = "Itanium" in sys.version
Win64 = AMD64 or Itanium
# Partially taken from Wine
datasizemask= 0x00ff
type_valid= 0x0100
type_localizable= 0x0200
typemask= 0x0c00
type_long= 0x0000
type_short= 0x0400
type_string= 0x0c00
type_binary= 0x0800
type_nullable= 0x1000
type_key= 0x2000
# XXX temporary, localizable?
knownbits = datasizemask | type_valid | type_localizable | \
typemask | type_nullable | type_key
class Table:
def __init__(self, name):
self.name = name
self.fields = []
def add_field(self, index, name, type):
self.fields.append((index,name,type))
def sql(self):
fields = []
keys = []
self.fields.sort()
fields = [None]*len(self.fields)
for index, name, type in self.fields:
index -= 1
unk = type & ~knownbits
if unk:
print "%s.%s unknown bits %x" % (self.name, name, unk)
size = type & datasizemask
dtype = type & typemask
if dtype == type_string:
if size:
tname="CHAR(%d)" % size
else:
tname="CHAR"
elif dtype == type_short:
assert size==2
tname = "SHORT"
elif dtype == type_long:
assert size==4
tname="LONG"
elif dtype == type_binary:
assert size==0
tname="OBJECT"
else:
tname="unknown"
print "%s.%sunknown integer type %d" % (self.name, name, size)
if type & type_nullable:
flags = ""
else:
flags = " NOT NULL"
if type & type_localizable:
flags += " LOCALIZABLE"
fields[index] = "`%s` %s%s" % (name, tname, flags)
if type & type_key:
keys.append("`%s`" % name)
fields = ", ".join(fields)
keys = ", ".join(keys)
return "CREATE TABLE %s (%s PRIMARY KEY %s)" % (self.name, fields, keys)
def create(self, db):
v = db.OpenView(self.sql())
v.Execute(None)
v.Close()
class _Unspecified:pass
def change_sequence(seq, action, seqno=_Unspecified, cond = _Unspecified):
"Change the sequence number of an action in a sequence list"
for i in range(len(seq)):
if seq[i][0] == action:
if cond is _Unspecified:
cond = seq[i][1]
if seqno is _Unspecified:
seqno = seq[i][2]
seq[i] = (action, cond, seqno)
return
raise ValueError, "Action not found in sequence"
def add_data(db, table, values):
v = db.OpenView("SELECT * FROM `%s`" % table)
count = v.GetColumnInfo(MSICOLINFO_NAMES).GetFieldCount()
r = CreateRecord(count)
for value in values:
assert len(value) == count, value
for i in range(count):
field = value[i]
if isinstance(field, (int, long)):
r.SetInteger(i+1,field)
elif isinstance(field, basestring):
r.SetString(i+1,field)
elif field is None:
pass
elif isinstance(field, Binary):
r.SetStream(i+1, field.name)
else:
raise TypeError, "Unsupported type %s" % field.__class__.__name__
try:
v.Modify(MSIMODIFY_INSERT, r)
except Exception, e:
raise MSIError("Could not insert "+repr(values)+" into "+table)
r.ClearData()
v.Close()
def add_stream(db, name, path):
v = db.OpenView("INSERT INTO _Streams (Name, Data) VALUES ('%s', ?)" % name)
r = CreateRecord(1)
r.SetStream(1, path)
v.Execute(r)
v.Close()
def init_database(name, schema,
ProductName, ProductCode, ProductVersion,
Manufacturer):
try:
os.unlink(name)
except OSError:
pass
ProductCode = ProductCode.upper()
# Create the database
db = OpenDatabase(name, MSIDBOPEN_CREATE)
# Create the tables
for t in schema.tables:
t.create(db)
# Fill the validation table
add_data(db, "_Validation", schema._Validation_records)
# Initialize the summary information, allowing atmost 20 properties
si = db.GetSummaryInformation(20)
si.SetProperty(PID_TITLE, "Installation Database")
si.SetProperty(PID_SUBJECT, ProductName)
si.SetProperty(PID_AUTHOR, Manufacturer)
if Itanium:
si.SetProperty(PID_TEMPLATE, "Intel64;1033")
elif AMD64:
si.SetProperty(PID_TEMPLATE, "x64;1033")
else:
si.SetProperty(PID_TEMPLATE, "Intel;1033")
si.SetProperty(PID_REVNUMBER, gen_uuid())
si.SetProperty(PID_WORDCOUNT, 2) # long file names, compressed, original media
si.SetProperty(PID_PAGECOUNT, 200)
si.SetProperty(PID_APPNAME, "Python MSI Library")
# XXX more properties
si.Persist()
add_data(db, "Property", [
("ProductName", ProductName),
("ProductCode", ProductCode),
("ProductVersion", ProductVersion),
("Manufacturer", Manufacturer),
("ProductLanguage", "1033")])
db.Commit()
return db
def add_tables(db, module):
for table in module.tables:
add_data(db, table, getattr(module, table))
def make_id(str):
identifier_chars = string.ascii_letters + string.digits + "._"
str = "".join([c if c in identifier_chars else "_" for c in str])
if str[0] in (string.digits + "."):
str = "_" + str
assert re.match("^[A-Za-z_][A-Za-z0-9_.]*$", str), "FILE"+str
return str
def gen_uuid():
return "{"+UuidCreate().upper()+"}"
class CAB:
def __init__(self, name):
self.name = name
self.files = []
self.filenames = set()
self.index = 0
def gen_id(self, file):
logical = _logical = make_id(file)
pos = 1
while logical in self.filenames:
logical = "%s.%d" % (_logical, pos)
pos += 1
self.filenames.add(logical)
return logical
def append(self, full, file, logical):
if os.path.isdir(full):
return
if not logical:
logical = self.gen_id(file)
self.index += 1
self.files.append((full, logical))
return self.index, logical
def commit(self, db):
from tempfile import mktemp
filename = mktemp()
FCICreate(filename, self.files)
add_data(db, "Media",
[(1, self.index, None, "#"+self.name, None, None)])
add_stream(db, self.name, filename)
os.unlink(filename)
db.Commit()
_directories = set()
class Directory:
def __init__(self, db, cab, basedir, physical, _logical, default, componentflags=None):
"""Create a new directory in the Directory table. There is a current component
at each point in time for the directory, which is either explicitly created
through start_component, or implicitly when files are added for the first
time. Files are added into the current component, and into the cab file.
To create a directory, a base directory object needs to be specified (can be
None), the path to the physical directory, and a logical directory name.
Default specifies the DefaultDir slot in the directory table. componentflags
specifies the default flags that new components get."""
index = 1
_logical = make_id(_logical)
logical = _logical
while logical in _directories:
logical = "%s%d" % (_logical, index)
index += 1
_directories.add(logical)
self.db = db
self.cab = cab
self.basedir = basedir
self.physical = physical
self.logical = logical
self.component = None
self.short_names = set()
self.ids = set()
self.keyfiles = {}
self.componentflags = componentflags
if basedir:
self.absolute = os.path.join(basedir.absolute, physical)
blogical = basedir.logical
else:
self.absolute = physical
blogical = None
add_data(db, "Directory", [(logical, blogical, default)])
def start_component(self, component = None, feature = None, flags = None, keyfile = None, uuid=None):
"""Add an entry to the Component table, and make this component the current for this
directory. If no component name is given, the directory name is used. If no feature
is given, the current feature is used. If no flags are given, the directory's default
flags are used. If no keyfile is given, the KeyPath is left null in the Component
table."""
if flags is None:
flags = self.componentflags
if uuid is None:
uuid = gen_uuid()
else:
uuid = uuid.upper()
if component is None:
component = self.logical
self.component = component
if Win64:
flags |= 256
if keyfile:
keyid = self.cab.gen_id(self.absolute, keyfile)
self.keyfiles[keyfile] = keyid
else:
keyid = None
add_data(self.db, "Component",
[(component, uuid, self.logical, flags, None, keyid)])
if feature is None:
feature = current_feature
add_data(self.db, "FeatureComponents",
[(feature.id, component)])
def make_short(self, file):
oldfile = file
file = file.replace('+', '_')
file = ''.join(c for c in file if not c in ' "/\[]:;=,')
parts = file.split(".")
if len(parts) > 1:
prefix = "".join(parts[:-1]).upper()
suffix = parts[-1].upper()
if not prefix:
prefix = suffix
suffix = None
else:
prefix = file.upper()
suffix = None
if len(parts) < 3 and len(prefix) <= 8 and file == oldfile and (
not suffix or len(suffix) <= 3):
if suffix:
file = prefix+"."+suffix
else:
file = prefix
else:
file = None
if file is None or file in self.short_names:
prefix = prefix[:6]
if suffix:
suffix = suffix[:3]
pos = 1
while 1:
if suffix:
file = "%s~%d.%s" % (prefix, pos, suffix)
else:
file = "%s~%d" % (prefix, pos)
if file not in self.short_names: break
pos += 1
assert pos < 10000
if pos in (10, 100, 1000):
prefix = prefix[:-1]
self.short_names.add(file)
assert not re.search(r'[\?|><:/*"+,;=\[\]]', file) # restrictions on short names
return file
def add_file(self, file, src=None, version=None, language=None):
"""Add a file to the current component of the directory, starting a new one
one if there is no current component. By default, the file name in the source
and the file table will be identical. If the src file is specified, it is
interpreted relative to the current directory. Optionally, a version and a
language can be specified for the entry in the File table."""
if not self.component:
self.start_component(self.logical, current_feature, 0)
if not src:
# Allow relative paths for file if src is not specified
src = file
file = os.path.basename(file)
absolute = os.path.join(self.absolute, src)
assert not re.search(r'[\?|><:/*]"', file) # restrictions on long names
if file in self.keyfiles:
logical = self.keyfiles[file]
else:
logical = None
sequence, logical = self.cab.append(absolute, file, logical)
assert logical not in self.ids
self.ids.add(logical)
short = self.make_short(file)
full = "%s|%s" % (short, file)
filesize = os.stat(absolute).st_size
# constants.msidbFileAttributesVital
# Compressed omitted, since it is the database default
# could add r/o, system, hidden
attributes = 512
add_data(self.db, "File",
[(logical, self.component, full, filesize, version,
language, attributes, sequence)])
#if not version:
# # Add hash if the file is not versioned
# filehash = FileHash(absolute, 0)
# add_data(self.db, "MsiFileHash",
# [(logical, 0, filehash.IntegerData(1),
# filehash.IntegerData(2), filehash.IntegerData(3),
# filehash.IntegerData(4))])
# Automatically remove .pyc/.pyo files on uninstall (2)
# XXX: adding so many RemoveFile entries makes installer unbelievably
# slow. So instead, we have to use wildcard remove entries
if file.endswith(".py"):
add_data(self.db, "RemoveFile",
[(logical+"c", self.component, "%sC|%sc" % (short, file),
self.logical, 2),
(logical+"o", self.component, "%sO|%so" % (short, file),
self.logical, 2)])
return logical
def glob(self, pattern, exclude = None):
"""Add a list of files to the current component as specified in the
glob pattern. Individual files can be excluded in the exclude list."""
files = glob.glob1(self.absolute, pattern)
for f in files:
if exclude and f in exclude: continue
self.add_file(f)
return files
def remove_pyc(self):
"Remove .pyc/.pyo files on uninstall"
add_data(self.db, "RemoveFile",
[(self.component+"c", self.component, "*.pyc", self.logical, 2),
(self.component+"o", self.component, "*.pyo", self.logical, 2)])
class Binary:
def __init__(self, fname):
self.name = fname
def __repr__(self):
return 'msilib.Binary(os.path.join(dirname,"%s"))' % self.name
class Feature:
def __init__(self, db, id, title, desc, display, level = 1,
parent=None, directory = None, attributes=0):
self.id = id
if parent:
parent = parent.id
add_data(db, "Feature",
[(id, parent, title, desc, display,
level, directory, attributes)])
def set_current(self):
global current_feature
current_feature = self
class Control:
def __init__(self, dlg, name):
self.dlg = dlg
self.name = name
def event(self, event, argument, condition = "1", ordering = None):
add_data(self.dlg.db, "ControlEvent",
[(self.dlg.name, self.name, event, argument,
condition, ordering)])
def mapping(self, event, attribute):
add_data(self.dlg.db, "EventMapping",
[(self.dlg.name, self.name, event, attribute)])
def condition(self, action, condition):
add_data(self.dlg.db, "ControlCondition",
[(self.dlg.name, self.name, action, condition)])
class RadioButtonGroup(Control):
def __init__(self, dlg, name, property):
self.dlg = dlg
self.name = name
self.property = property
self.index = 1
def add(self, name, x, y, w, h, text, value = None):
if value is None:
value = name
add_data(self.dlg.db, "RadioButton",
[(self.property, self.index, value,
x, y, w, h, text, None)])
self.index += 1
class Dialog:
def __init__(self, db, name, x, y, w, h, attr, title, first, default, cancel):
self.db = db
self.name = name
self.x, self.y, self.w, self.h = x,y,w,h
add_data(db, "Dialog", [(name, x,y,w,h,attr,title,first,default,cancel)])
def control(self, name, type, x, y, w, h, attr, prop, text, next, help):
add_data(self.db, "Control",
[(self.name, name, type, x, y, w, h, attr, prop, text, next, help)])
return Control(self, name)
def text(self, name, x, y, w, h, attr, text):
return self.control(name, "Text", x, y, w, h, attr, None,
text, None, None)
def bitmap(self, name, x, y, w, h, text):
return self.control(name, "Bitmap", x, y, w, h, 1, None, text, None, None)
def line(self, name, x, y, w, h):
return self.control(name, "Line", x, y, w, h, 1, None, None, None, None)
def pushbutton(self, name, x, y, w, h, attr, text, next):
return self.control(name, "PushButton", x, y, w, h, attr, None, text, next, None)
def radiogroup(self, name, x, y, w, h, attr, prop, text, next):
add_data(self.db, "Control",
[(self.name, name, "RadioButtonGroup",
x, y, w, h, attr, prop, text, next, None)])
return RadioButtonGroup(self, name, prop)
def checkbox(self, name, x, y, w, h, attr, prop, text, next):
return self.control(name, "CheckBox", x, y, w, h, attr, prop, text, next, None)
|
laayis/yowsup
|
refs/heads/master
|
yowsup/layers/protocol_groups/protocolentities/iq_groups_participants_remove.py
|
61
|
from yowsup.structs import ProtocolEntity, ProtocolTreeNode
from .iq_groups_participants import ParticipantsGroupsIqProtocolEntity
class RemoveParticipantsIqProtocolEntity(ParticipantsGroupsIqProtocolEntity):
'''
<iq type="set" id="{{id}}" xmlns="w:g2", to="{{group_jid}}">
<remove>
<participant jid="{{jid}}"></participant>
<participant jid="{{jid}}"></participant>
</remove>
</iq>
'''
def __init__(self, group_jid, participantList, _id = None):
super(RemoveParticipantsIqProtocolEntity, self).__init__(group_jid, participantList, "remove", _id = _id)
@staticmethod
def fromProtocolTreeNode(node):
entity = super(RemoveParticipantsIqProtocolEntity, RemoveParticipantsIqProtocolEntity).fromProtocolTreeNode(node)
entity.__class__ = RemoveParticipantsIqProtocolEntity
participantList = []
for participantNode in node.getChild("remove").getAllChildren():
participantList.append(participantNode["jid"])
entity.setProps(node.getAttributeValue("to"), participantList)
return entity
|
zhjunlang/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Lib/test/test_importlib/import_/test___loader__.py
|
84
|
from importlib import machinery
import sys
import types
import unittest
from .. import util
from . import util as import_util
class SpecLoaderMock:
def find_spec(self, fullname, path=None, target=None):
return machinery.ModuleSpec(fullname, self)
def exec_module(self, module):
pass
class SpecLoaderAttributeTests:
def test___loader__(self):
loader = SpecLoaderMock()
with util.uncache('blah'), util.import_state(meta_path=[loader]):
module = self.__import__('blah')
self.assertEqual(loader, module.__loader__)
Frozen_SpecTests, Source_SpecTests = util.test_both(
SpecLoaderAttributeTests, __import__=import_util.__import__)
class LoaderMock:
def find_module(self, fullname, path=None):
return self
def load_module(self, fullname):
sys.modules[fullname] = self.module
return self.module
class LoaderAttributeTests:
def test___loader___missing(self):
module = types.ModuleType('blah')
try:
del module.__loader__
except AttributeError:
pass
loader = LoaderMock()
loader.module = module
with util.uncache('blah'), util.import_state(meta_path=[loader]):
module = self.__import__('blah')
self.assertEqual(loader, module.__loader__)
def test___loader___is_None(self):
module = types.ModuleType('blah')
module.__loader__ = None
loader = LoaderMock()
loader.module = module
with util.uncache('blah'), util.import_state(meta_path=[loader]):
returned_module = self.__import__('blah')
self.assertEqual(loader, module.__loader__)
Frozen_Tests, Source_Tests = util.test_both(LoaderAttributeTests,
__import__=import_util.__import__)
if __name__ == '__main__':
unittest.main()
|
Work4Labs/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.2.5/django/conf/locale/pl/__init__.py
|
12133432
| |
olasitarska/django
|
refs/heads/master
|
django/contrib/gis/tests/gis_migrations/__init__.py
|
12133432
| |
throwable-one/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.2.5/tests/modeltests/test_client/__init__.py
|
12133432
| |
axsauze/eventsfinder
|
refs/heads/master
|
djangoappengine/mapreduce/__init__.py
|
12133432
| |
prakritish/ansible
|
refs/heads/devel
|
test/integration/targets/module_utils/module_utils/qux2/__init__.py
|
12133432
| |
tatouzri/twisted_texas_holdem
|
refs/heads/master
|
texas_holdem/message/registerUser.py
|
12133432
| |
navodissa/python-flask
|
refs/heads/master
|
flask/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/contrib/__init__.py
|
12133432
| |
duhzecca/cinder
|
refs/heads/master
|
cinder/api/views/__init__.py
|
12133432
| |
ozburo/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/drbonanza.py
|
50
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
js_to_json,
parse_duration,
unescapeHTML,
)
class DRBonanzaIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?dr\.dk/bonanza/[^/]+/\d+/[^/]+/(?P<id>\d+)/(?P<display_id>[^/?#&]+)'
_TEST = {
'url': 'http://www.dr.dk/bonanza/serie/154/matador/40312/matador---0824-komme-fremmede-',
'info_dict': {
'id': '40312',
'display_id': 'matador---0824-komme-fremmede-',
'ext': 'mp4',
'title': 'MATADOR - 08:24. "Komme fremmede".',
'description': 'md5:77b4c1ac4d4c1b9d610ab4395212ff84',
'thumbnail': r're:^https?://.*\.(?:gif|jpg)$',
'duration': 4613,
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id, display_id = mobj.group('id', 'display_id')
webpage = self._download_webpage(url, display_id)
info = self._parse_html5_media_entries(
url, webpage, display_id, m3u8_id='hls',
m3u8_entry_protocol='m3u8_native')[0]
self._sort_formats(info['formats'])
asset = self._parse_json(
self._search_regex(
r'(?s)currentAsset\s*=\s*({.+?})\s*</script', webpage, 'asset'),
display_id, transform_source=js_to_json)
title = unescapeHTML(asset['AssetTitle']).strip()
def extract(field):
return self._search_regex(
r'<div[^>]+>\s*<p>%s:<p>\s*</div>\s*<div[^>]+>\s*<p>([^<]+)</p>' % field,
webpage, field, default=None)
info.update({
'id': asset.get('AssetId') or video_id,
'display_id': display_id,
'title': title,
'description': extract('Programinfo'),
'duration': parse_duration(extract('Tid')),
'thumbnail': asset.get('AssetImageUrl'),
})
return info
|
MartinHjelmare/home-assistant
|
refs/heads/dev
|
tests/components/automation/test_event.py
|
10
|
"""The tests for the Event automation."""
import pytest
from homeassistant.core import Context
from homeassistant.setup import async_setup_component
import homeassistant.components.automation as automation
from tests.common import mock_component
from tests.components.automation import common
from tests.common import async_mock_service
@pytest.fixture
def calls(hass):
"""Track calls to a mock serivce."""
return async_mock_service(hass, 'test', 'automation')
@pytest.fixture(autouse=True)
def setup_comp(hass):
"""Initialize components."""
mock_component(hass, 'group')
async def test_if_fires_on_event(hass, calls):
"""Test the firing of events."""
context = Context()
assert await async_setup_component(hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event',
},
'action': {
'service': 'test.automation',
}
}
})
hass.bus.async_fire('test_event', context=context)
await hass.async_block_till_done()
assert 1 == len(calls)
assert calls[0].context.parent_id == context.id
await common.async_turn_off(hass)
await hass.async_block_till_done()
hass.bus.async_fire('test_event')
await hass.async_block_till_done()
assert 1 == len(calls)
async def test_if_fires_on_event_extra_data(hass, calls):
"""Test the firing of events still matches with event data."""
assert await async_setup_component(hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event',
},
'action': {
'service': 'test.automation',
}
}
})
hass.bus.async_fire('test_event', {'extra_key': 'extra_data'})
await hass.async_block_till_done()
assert 1 == len(calls)
await common.async_turn_off(hass)
await hass.async_block_till_done()
hass.bus.async_fire('test_event')
await hass.async_block_till_done()
assert 1 == len(calls)
async def test_if_fires_on_event_with_data(hass, calls):
"""Test the firing of events with data."""
assert await async_setup_component(hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event',
'event_data': {'some_attr': 'some_value'}
},
'action': {
'service': 'test.automation',
}
}
})
hass.bus.async_fire('test_event', {'some_attr': 'some_value',
'another': 'value'})
await hass.async_block_till_done()
assert 1 == len(calls)
async def test_if_fires_on_event_with_empty_data_config(hass, calls):
"""Test the firing of events with empty data config.
The frontend automation editor can produce configurations with an
empty dict for event_data instead of no key.
"""
assert await async_setup_component(hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event',
'event_data': {}
},
'action': {
'service': 'test.automation',
}
}
})
hass.bus.async_fire('test_event', {'some_attr': 'some_value',
'another': 'value'})
await hass.async_block_till_done()
assert 1 == len(calls)
async def test_if_fires_on_event_with_nested_data(hass, calls):
"""Test the firing of events with nested data."""
assert await async_setup_component(hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event',
'event_data': {
'parent_attr': {
'some_attr': 'some_value'
}
}
},
'action': {
'service': 'test.automation',
}
}
})
hass.bus.async_fire('test_event', {
'parent_attr': {
'some_attr': 'some_value',
'another': 'value'
}
})
await hass.async_block_till_done()
assert 1 == len(calls)
async def test_if_not_fires_if_event_data_not_matches(hass, calls):
"""Test firing of event if no match."""
assert await async_setup_component(hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event',
'event_data': {'some_attr': 'some_value'}
},
'action': {
'service': 'test.automation',
}
}
})
hass.bus.async_fire('test_event', {'some_attr': 'some_other_value'})
await hass.async_block_till_done()
assert 0 == len(calls)
|
argv0/cloudstack
|
refs/heads/master
|
tools/marvin/marvin/marvinPlugin.py
|
2
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import marvin
import logging
import nose.core
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin import deployDataCenter
from nose.plugins.base import Plugin
from functools import partial
def testCaseLogger(message, logger=None):
if logger is not None:
logger.debug(message)
class MarvinPlugin(Plugin):
"""
Custom plugin for the cloudstackTestCases to be run using nose
"""
name = "marvin"
def configure(self, options, config):
self.enabled = 1
self.enableOpt = "--with-marvin"
self.logformat = logging.Formatter("%(asctime)s - %(levelname)s - %(name)s - %(message)s")
if options.debug_log:
self.logger = logging.getLogger("NoseTestExecuteEngine")
self.debug_stream = logging.FileHandler(options.debug_log)
self.debug_stream.setFormatter(self.logformat)
self.logger.addHandler(self.debug_stream)
self.logger.setLevel(logging.DEBUG)
if options.result_log:
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
ch.setFormatter(self.logformat)
self.logger.addHandler(ch)
self.result_stream = open(options.result_log, "w")
else:
self.result_stream = sys.stderr
deploy = deployDataCenter.deployDataCenters(options.config)
deploy.loadCfg() if options.load else deploy.deploy()
self.setClient(deploy.testClient)
cfg = nose.config.Config()
cfg.logStream = self.result_stream
cfg.debugLog = self.debug_stream
self.testrunner = nose.core.TextTestRunner(stream=self.result_stream, descriptions=True, verbosity=2, config=config)
def options(self, parser, env):
"""
Register command line options
"""
parser.add_option("--marvin-config", action="store",
default=env.get('MARVIN_CONFIG', './datacenter.cfg'),
dest="config",
help="Marvin's configuration file where the datacenter information is specified [MARVIN_CONFIG]")
parser.add_option("--result-log", action="store",
default=env.get('RESULT_LOG', 'result.log'),
dest="result_log",
help="The path to the results file where test summary will be written to [RESULT_LOG]")
parser.add_option("--client-log", action="store",
default=env.get('DEBUG_LOG', 'debug.log'),
dest="debug_log",
help="The path to the testcase debug logs [DEBUG_LOG]")
parser.add_option("--load", action="store_true", default=False, dest="load",
help="Only load the deployment configuration given")
Plugin.options(self, parser, env)
def __init__(self):
Plugin.__init__(self)
def prepareTestRunner(self, runner):
return self.testrunner
def wantClass(self, cls):
if issubclass(cls, cloudstackTestCase):
return True
return None
def loadTestsFromTestCase(self, cls):
self._injectClients(cls)
def setClient(self, client):
if client:
self.testclient = client
def _injectClients(self, test):
testcaselogger = logging.getLogger("testclient.testcase.%s" % test.__name__)
self.debug_stream.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(name)s - %(message)s"))
testcaselogger.addHandler(self.debug_stream)
testcaselogger.setLevel(logging.DEBUG)
setattr(test, "testClient", self.testclient)
setattr(test, "debug", partial(testCaseLogger, logger=testcaselogger))
setattr(test, "clstestclient", self.testclient)
if hasattr(test, "UserName"):
self.testclient.createNewApiClient(test.UserName, test.DomainName, test.AcctType)
|
towerjoo/DjangoNotes
|
refs/heads/master
|
Django-1.5.1/django/core/management/sql.py
|
104
|
from __future__ import unicode_literals
import codecs
import os
import re
from django.conf import settings
from django.core.management.base import CommandError
from django.db import models
from django.db.models import get_models
from django.utils._os import upath
def sql_create(app, style, connection):
"Returns a list of the CREATE TABLE SQL statements for the given app."
if connection.settings_dict['ENGINE'] == 'django.db.backends.dummy':
# This must be the "dummy" database backend, which means the user
# hasn't set ENGINE for the database.
raise CommandError("Django doesn't know which syntax to use for your SQL statements,\n" +
"because you haven't properly specified the ENGINE setting for the database.\n" +
"see: https://docs.djangoproject.com/en/dev/ref/settings/#databases")
# Get installed models, so we generate REFERENCES right.
# We trim models from the current app so that the sqlreset command does not
# generate invalid SQL (leaving models out of known_models is harmless, so
# we can be conservative).
app_models = models.get_models(app, include_auto_created=True)
final_output = []
tables = connection.introspection.table_names()
known_models = set([model for model in connection.introspection.installed_models(tables) if model not in app_models])
pending_references = {}
for model in app_models:
output, references = connection.creation.sql_create_model(model, style, known_models)
final_output.extend(output)
for refto, refs in references.items():
pending_references.setdefault(refto, []).extend(refs)
if refto in known_models:
final_output.extend(connection.creation.sql_for_pending_references(refto, style, pending_references))
final_output.extend(connection.creation.sql_for_pending_references(model, style, pending_references))
# Keep track of the fact that we've created the table for this model.
known_models.add(model)
# Handle references to tables that are from other apps
# but don't exist physically.
not_installed_models = set(pending_references.keys())
if not_installed_models:
alter_sql = []
for model in not_installed_models:
alter_sql.extend(['-- ' + sql for sql in
connection.creation.sql_for_pending_references(model, style, pending_references)])
if alter_sql:
final_output.append('-- The following references should be added but depend on non-existent tables:')
final_output.extend(alter_sql)
return final_output
def sql_delete(app, style, connection):
"Returns a list of the DROP TABLE SQL statements for the given app."
# This should work even if a connection isn't available
try:
cursor = connection.cursor()
except:
cursor = None
# Figure out which tables already exist
if cursor:
table_names = connection.introspection.table_names(cursor)
else:
table_names = []
output = []
# Output DROP TABLE statements for standard application tables.
to_delete = set()
references_to_delete = {}
app_models = models.get_models(app, include_auto_created=True)
for model in app_models:
if cursor and connection.introspection.table_name_converter(model._meta.db_table) in table_names:
# The table exists, so it needs to be dropped
opts = model._meta
for f in opts.local_fields:
if f.rel and f.rel.to not in to_delete:
references_to_delete.setdefault(f.rel.to, []).append((model, f))
to_delete.add(model)
for model in app_models:
if connection.introspection.table_name_converter(model._meta.db_table) in table_names:
output.extend(connection.creation.sql_destroy_model(model, references_to_delete, style))
# Close database connection explicitly, in case this output is being piped
# directly into a database client, to avoid locking issues.
if cursor:
cursor.close()
connection.close()
return output[::-1] # Reverse it, to deal with table dependencies.
def sql_flush(style, connection, only_django=False, reset_sequences=True):
"""
Returns a list of the SQL statements used to flush the database.
If only_django is True, then only table names that have associated Django
models and are in INSTALLED_APPS will be included.
"""
if only_django:
tables = connection.introspection.django_table_names(only_existing=True)
else:
tables = connection.introspection.table_names()
seqs = connection.introspection.sequence_list() if reset_sequences else ()
statements = connection.ops.sql_flush(style, tables, seqs)
return statements
def sql_custom(app, style, connection):
"Returns a list of the custom table modifying SQL statements for the given app."
output = []
app_models = get_models(app)
for model in app_models:
output.extend(custom_sql_for_model(model, style, connection))
return output
def sql_indexes(app, style, connection):
"Returns a list of the CREATE INDEX SQL statements for all models in the given app."
output = []
for model in models.get_models(app):
output.extend(connection.creation.sql_indexes_for_model(model, style))
return output
def sql_all(app, style, connection):
"Returns a list of CREATE TABLE SQL, initial-data inserts, and CREATE INDEX SQL for the given module."
return sql_create(app, style, connection) + sql_custom(app, style, connection) + sql_indexes(app, style, connection)
def _split_statements(content):
comment_re = re.compile(r"^((?:'[^']*'|[^'])*?)--.*$")
statements = []
statement = []
for line in content.split("\n"):
cleaned_line = comment_re.sub(r"\1", line).strip()
if not cleaned_line:
continue
statement.append(cleaned_line)
if cleaned_line.endswith(";"):
statements.append(" ".join(statement))
statement = []
return statements
def custom_sql_for_model(model, style, connection):
opts = model._meta
app_dir = os.path.normpath(os.path.join(os.path.dirname(upath(models.get_app(model._meta.app_label).__file__)), 'sql'))
output = []
# Post-creation SQL should come before any initial SQL data is loaded.
# However, this should not be done for models that are unmanaged or
# for fields that are part of a parent model (via model inheritance).
if opts.managed:
post_sql_fields = [f for f in opts.local_fields if hasattr(f, 'post_create_sql')]
for f in post_sql_fields:
output.extend(f.post_create_sql(style, model._meta.db_table))
# Find custom SQL, if it's available.
backend_name = connection.settings_dict['ENGINE'].split('.')[-1]
sql_files = [os.path.join(app_dir, "%s.%s.sql" % (opts.object_name.lower(), backend_name)),
os.path.join(app_dir, "%s.sql" % opts.object_name.lower())]
for sql_file in sql_files:
if os.path.exists(sql_file):
with codecs.open(sql_file, 'U', encoding=settings.FILE_CHARSET) as fp:
# Some backends can't execute more than one SQL statement at a time,
# so split into separate statements.
output.extend(_split_statements(fp.read()))
return output
def emit_post_sync_signal(created_models, verbosity, interactive, db):
# Emit the post_sync signal for every application.
for app in models.get_apps():
app_name = app.__name__.split('.')[-2]
if verbosity >= 2:
print("Running post-sync handlers for application %s" % app_name)
models.signals.post_syncdb.send(sender=app, app=app,
created_models=created_models, verbosity=verbosity,
interactive=interactive, db=db)
|
jdar/phantomjs-modified
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/hybi.py
|
139
|
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This file provides the opening handshake processor for the WebSocket
protocol (RFC 6455).
Specification:
http://tools.ietf.org/html/rfc6455
"""
# Note: request.connection.write is used in this module, even though mod_python
# document says that it should be used only in connection handlers.
# Unfortunately, we have no other options. For example, request.write is not
# suitable because it doesn't allow direct raw bytes writing.
import base64
import logging
import os
import re
from mod_pywebsocket import common
from mod_pywebsocket.extensions import get_extension_processor
from mod_pywebsocket.extensions import is_compression_extension
from mod_pywebsocket.handshake._base import check_request_line
from mod_pywebsocket.handshake._base import format_header
from mod_pywebsocket.handshake._base import get_mandatory_header
from mod_pywebsocket.handshake._base import HandshakeException
from mod_pywebsocket.handshake._base import parse_token_list
from mod_pywebsocket.handshake._base import validate_mandatory_header
from mod_pywebsocket.handshake._base import validate_subprotocol
from mod_pywebsocket.handshake._base import VersionException
from mod_pywebsocket.stream import Stream
from mod_pywebsocket.stream import StreamOptions
from mod_pywebsocket import util
# Used to validate the value in the Sec-WebSocket-Key header strictly. RFC 4648
# disallows non-zero padding, so the character right before == must be any of
# A, Q, g and w.
_SEC_WEBSOCKET_KEY_REGEX = re.compile('^[+/0-9A-Za-z]{21}[AQgw]==$')
# Defining aliases for values used frequently.
_VERSION_HYBI08 = common.VERSION_HYBI08
_VERSION_HYBI08_STRING = str(_VERSION_HYBI08)
_VERSION_LATEST = common.VERSION_HYBI_LATEST
_VERSION_LATEST_STRING = str(_VERSION_LATEST)
_SUPPORTED_VERSIONS = [
_VERSION_LATEST,
_VERSION_HYBI08,
]
def compute_accept(key):
"""Computes value for the Sec-WebSocket-Accept header from value of the
Sec-WebSocket-Key header.
"""
accept_binary = util.sha1_hash(
key + common.WEBSOCKET_ACCEPT_UUID).digest()
accept = base64.b64encode(accept_binary)
return (accept, accept_binary)
class Handshaker(object):
"""Opening handshake processor for the WebSocket protocol (RFC 6455)."""
def __init__(self, request, dispatcher):
"""Construct an instance.
Args:
request: mod_python request.
dispatcher: Dispatcher (dispatch.Dispatcher).
Handshaker will add attributes such as ws_resource during handshake.
"""
self._logger = util.get_class_logger(self)
self._request = request
self._dispatcher = dispatcher
def _validate_connection_header(self):
connection = get_mandatory_header(
self._request, common.CONNECTION_HEADER)
try:
connection_tokens = parse_token_list(connection)
except HandshakeException, e:
raise HandshakeException(
'Failed to parse %s: %s' % (common.CONNECTION_HEADER, e))
connection_is_valid = False
for token in connection_tokens:
if token.lower() == common.UPGRADE_CONNECTION_TYPE.lower():
connection_is_valid = True
break
if not connection_is_valid:
raise HandshakeException(
'%s header doesn\'t contain "%s"' %
(common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE))
def do_handshake(self):
self._request.ws_close_code = None
self._request.ws_close_reason = None
# Parsing.
check_request_line(self._request)
validate_mandatory_header(
self._request,
common.UPGRADE_HEADER,
common.WEBSOCKET_UPGRADE_TYPE)
self._validate_connection_header()
self._request.ws_resource = self._request.uri
unused_host = get_mandatory_header(self._request, common.HOST_HEADER)
self._request.ws_version = self._check_version()
# This handshake must be based on latest hybi. We are responsible to
# fallback to HTTP on handshake failure as latest hybi handshake
# specifies.
try:
self._get_origin()
self._set_protocol()
self._parse_extensions()
# Key validation, response generation.
key = self._get_key()
(accept, accept_binary) = compute_accept(key)
self._logger.debug(
'%s: %r (%s)',
common.SEC_WEBSOCKET_ACCEPT_HEADER,
accept,
util.hexify(accept_binary))
self._logger.debug('Protocol version is RFC 6455')
# Setup extension processors.
processors = []
if self._request.ws_requested_extensions is not None:
for extension_request in self._request.ws_requested_extensions:
processor = get_extension_processor(extension_request)
# Unknown extension requests are just ignored.
if processor is not None:
processors.append(processor)
self._request.ws_extension_processors = processors
# List of extra headers. The extra handshake handler may add header
# data as name/value pairs to this list and pywebsocket appends
# them to the WebSocket handshake.
self._request.extra_headers = []
# Extra handshake handler may modify/remove processors.
self._dispatcher.do_extra_handshake(self._request)
processors = filter(lambda processor: processor is not None,
self._request.ws_extension_processors)
# Ask each processor if there are extensions on the request which
# cannot co-exist. When processor decided other processors cannot
# co-exist with it, the processor marks them (or itself) as
# "inactive". The first extension processor has the right to
# make the final call.
for processor in reversed(processors):
if processor.is_active():
processor.check_consistency_with_other_processors(
processors)
processors = filter(lambda processor: processor.is_active(),
processors)
accepted_extensions = []
# We need to take into account of mux extension here.
# If mux extension exists:
# - Remove processors of extensions for logical channel,
# which are processors located before the mux processor
# - Pass extension requests for logical channel to mux processor
# - Attach the mux processor to the request. It will be referred
# by dispatcher to see whether the dispatcher should use mux
# handler or not.
mux_index = -1
for i, processor in enumerate(processors):
if processor.name() == common.MUX_EXTENSION:
mux_index = i
break
if mux_index >= 0:
logical_channel_extensions = []
for processor in processors[:mux_index]:
logical_channel_extensions.append(processor.request())
processor.set_active(False)
self._request.mux_processor = processors[mux_index]
self._request.mux_processor.set_extensions(
logical_channel_extensions)
processors = filter(lambda processor: processor.is_active(),
processors)
stream_options = StreamOptions()
for index, processor in enumerate(processors):
if not processor.is_active():
continue
extension_response = processor.get_extension_response()
if extension_response is None:
# Rejected.
continue
accepted_extensions.append(extension_response)
processor.setup_stream_options(stream_options)
if not is_compression_extension(processor.name()):
continue
# Inactivate all of the following compression extensions.
for j in xrange(index + 1, len(processors)):
if is_compression_extension(processors[j].name()):
processors[j].set_active(False)
if len(accepted_extensions) > 0:
self._request.ws_extensions = accepted_extensions
self._logger.debug(
'Extensions accepted: %r',
map(common.ExtensionParameter.name, accepted_extensions))
else:
self._request.ws_extensions = None
self._request.ws_stream = self._create_stream(stream_options)
if self._request.ws_requested_protocols is not None:
if self._request.ws_protocol is None:
raise HandshakeException(
'do_extra_handshake must choose one subprotocol from '
'ws_requested_protocols and set it to ws_protocol')
validate_subprotocol(self._request.ws_protocol)
self._logger.debug(
'Subprotocol accepted: %r',
self._request.ws_protocol)
else:
if self._request.ws_protocol is not None:
raise HandshakeException(
'ws_protocol must be None when the client didn\'t '
'request any subprotocol')
self._send_handshake(accept)
except HandshakeException, e:
if not e.status:
# Fallback to 400 bad request by default.
e.status = common.HTTP_STATUS_BAD_REQUEST
raise e
def _get_origin(self):
if self._request.ws_version is _VERSION_HYBI08:
origin_header = common.SEC_WEBSOCKET_ORIGIN_HEADER
else:
origin_header = common.ORIGIN_HEADER
origin = self._request.headers_in.get(origin_header)
if origin is None:
self._logger.debug('Client request does not have origin header')
self._request.ws_origin = origin
def _check_version(self):
version = get_mandatory_header(self._request,
common.SEC_WEBSOCKET_VERSION_HEADER)
if version == _VERSION_HYBI08_STRING:
return _VERSION_HYBI08
if version == _VERSION_LATEST_STRING:
return _VERSION_LATEST
if version.find(',') >= 0:
raise HandshakeException(
'Multiple versions (%r) are not allowed for header %s' %
(version, common.SEC_WEBSOCKET_VERSION_HEADER),
status=common.HTTP_STATUS_BAD_REQUEST)
raise VersionException(
'Unsupported version %r for header %s' %
(version, common.SEC_WEBSOCKET_VERSION_HEADER),
supported_versions=', '.join(map(str, _SUPPORTED_VERSIONS)))
def _set_protocol(self):
self._request.ws_protocol = None
protocol_header = self._request.headers_in.get(
common.SEC_WEBSOCKET_PROTOCOL_HEADER)
if protocol_header is None:
self._request.ws_requested_protocols = None
return
self._request.ws_requested_protocols = parse_token_list(
protocol_header)
self._logger.debug('Subprotocols requested: %r',
self._request.ws_requested_protocols)
def _parse_extensions(self):
extensions_header = self._request.headers_in.get(
common.SEC_WEBSOCKET_EXTENSIONS_HEADER)
if not extensions_header:
self._request.ws_requested_extensions = None
return
if self._request.ws_version is common.VERSION_HYBI08:
allow_quoted_string=False
else:
allow_quoted_string=True
try:
self._request.ws_requested_extensions = common.parse_extensions(
extensions_header, allow_quoted_string=allow_quoted_string)
except common.ExtensionParsingException, e:
raise HandshakeException(
'Failed to parse Sec-WebSocket-Extensions header: %r' % e)
self._logger.debug(
'Extensions requested: %r',
map(common.ExtensionParameter.name,
self._request.ws_requested_extensions))
def _validate_key(self, key):
if key.find(',') >= 0:
raise HandshakeException('Request has multiple %s header lines or '
'contains illegal character \',\': %r' %
(common.SEC_WEBSOCKET_KEY_HEADER, key))
# Validate
key_is_valid = False
try:
# Validate key by quick regex match before parsing by base64
# module. Because base64 module skips invalid characters, we have
# to do this in advance to make this server strictly reject illegal
# keys.
if _SEC_WEBSOCKET_KEY_REGEX.match(key):
decoded_key = base64.b64decode(key)
if len(decoded_key) == 16:
key_is_valid = True
except TypeError, e:
pass
if not key_is_valid:
raise HandshakeException(
'Illegal value for header %s: %r' %
(common.SEC_WEBSOCKET_KEY_HEADER, key))
return decoded_key
def _get_key(self):
key = get_mandatory_header(
self._request, common.SEC_WEBSOCKET_KEY_HEADER)
decoded_key = self._validate_key(key)
self._logger.debug(
'%s: %r (%s)',
common.SEC_WEBSOCKET_KEY_HEADER,
key,
util.hexify(decoded_key))
return key
def _create_stream(self, stream_options):
return Stream(self._request, stream_options)
def _create_handshake_response(self, accept):
response = []
response.append('HTTP/1.1 101 Switching Protocols\r\n')
# WebSocket headers
response.append(format_header(
common.UPGRADE_HEADER, common.WEBSOCKET_UPGRADE_TYPE))
response.append(format_header(
common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE))
response.append(format_header(
common.SEC_WEBSOCKET_ACCEPT_HEADER, accept))
if self._request.ws_protocol is not None:
response.append(format_header(
common.SEC_WEBSOCKET_PROTOCOL_HEADER,
self._request.ws_protocol))
if (self._request.ws_extensions is not None and
len(self._request.ws_extensions) != 0):
response.append(format_header(
common.SEC_WEBSOCKET_EXTENSIONS_HEADER,
common.format_extensions(self._request.ws_extensions)))
# Headers not specific for WebSocket
for name, value in self._request.extra_headers:
response.append(format_header(name, value))
response.append('\r\n')
return ''.join(response)
def _send_handshake(self, accept):
raw_response = self._create_handshake_response(accept)
self._request.connection.write(raw_response)
self._logger.debug('Sent server\'s opening handshake: %r',
raw_response)
# vi:sts=4 sw=4 et
|
Microvellum/Fluid-Designer
|
refs/heads/master
|
win64-vc/2.78/Python/lib/site-packages/pip/_vendor/pkg_resources/__init__.py
|
320
|
# coding: utf-8
"""
Package resource API
--------------------
A resource is a logical file contained within a package, or a logical
subdirectory thereof. The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is. Do not use os.path operations to manipulate resource
names being passed into the API.
The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files. It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
"""
from __future__ import absolute_import
import sys
import os
import io
import time
import re
import types
import zipfile
import zipimport
import warnings
import stat
import functools
import pkgutil
import operator
import platform
import collections
import plistlib
import email.parser
import tempfile
import textwrap
import itertools
from pkgutil import get_importer
try:
import _imp
except ImportError:
# Python 3.2 compatibility
import imp as _imp
from pip._vendor import six
from pip._vendor.six.moves import urllib, map, filter
# capture these to bypass sandboxing
from os import utime
try:
from os import mkdir, rename, unlink
WRITE_SUPPORT = True
except ImportError:
# no write support, probably under GAE
WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
try:
import importlib.machinery as importlib_machinery
# access attribute to force import under delayed import mechanisms.
importlib_machinery.__name__
except ImportError:
importlib_machinery = None
from pip._vendor import appdirs
from pip._vendor import packaging
__import__('pip._vendor.packaging.version')
__import__('pip._vendor.packaging.specifiers')
__import__('pip._vendor.packaging.requirements')
__import__('pip._vendor.packaging.markers')
if (3, 0) < sys.version_info < (3, 3):
msg = (
"Support for Python 3.0-3.2 has been dropped. Future versions "
"will fail here."
)
warnings.warn(msg)
# declare some globals that will be defined later to
# satisfy the linters.
require = None
working_set = None
class PEP440Warning(RuntimeWarning):
"""
Used when there is an issue with a version or specifier not complying with
PEP 440.
"""
class _SetuptoolsVersionMixin(object):
def __hash__(self):
return super(_SetuptoolsVersionMixin, self).__hash__()
def __lt__(self, other):
if isinstance(other, tuple):
return tuple(self) < other
else:
return super(_SetuptoolsVersionMixin, self).__lt__(other)
def __le__(self, other):
if isinstance(other, tuple):
return tuple(self) <= other
else:
return super(_SetuptoolsVersionMixin, self).__le__(other)
def __eq__(self, other):
if isinstance(other, tuple):
return tuple(self) == other
else:
return super(_SetuptoolsVersionMixin, self).__eq__(other)
def __ge__(self, other):
if isinstance(other, tuple):
return tuple(self) >= other
else:
return super(_SetuptoolsVersionMixin, self).__ge__(other)
def __gt__(self, other):
if isinstance(other, tuple):
return tuple(self) > other
else:
return super(_SetuptoolsVersionMixin, self).__gt__(other)
def __ne__(self, other):
if isinstance(other, tuple):
return tuple(self) != other
else:
return super(_SetuptoolsVersionMixin, self).__ne__(other)
def __getitem__(self, key):
return tuple(self)[key]
def __iter__(self):
component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
replace = {
'pre': 'c',
'preview': 'c',
'-': 'final-',
'rc': 'c',
'dev': '@',
}.get
def _parse_version_parts(s):
for part in component_re.split(s):
part = replace(part, part)
if not part or part == '.':
continue
if part[:1] in '0123456789':
# pad for numeric comparison
yield part.zfill(8)
else:
yield '*' + part
# ensure that alpha/beta/candidate are before final
yield '*final'
def old_parse_version(s):
parts = []
for part in _parse_version_parts(s.lower()):
if part.startswith('*'):
# remove '-' before a prerelease tag
if part < '*final':
while parts and parts[-1] == '*final-':
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == '00000000':
parts.pop()
parts.append(part)
return tuple(parts)
# Warn for use of this function
warnings.warn(
"You have iterated over the result of "
"pkg_resources.parse_version. This is a legacy behavior which is "
"inconsistent with the new version class introduced in setuptools "
"8.0. In most cases, conversion to a tuple is unnecessary. For "
"comparison of versions, sort the Version instances directly. If "
"you have another use case requiring the tuple, please file a "
"bug with the setuptools project describing that need.",
RuntimeWarning,
stacklevel=1,
)
for part in old_parse_version(str(self)):
yield part
class SetuptoolsVersion(_SetuptoolsVersionMixin, packaging.version.Version):
pass
class SetuptoolsLegacyVersion(_SetuptoolsVersionMixin,
packaging.version.LegacyVersion):
pass
def parse_version(v):
try:
return SetuptoolsVersion(v)
except packaging.version.InvalidVersion:
return SetuptoolsLegacyVersion(v)
_state_vars = {}
def _declare_state(vartype, **kw):
globals().update(kw)
_state_vars.update(dict.fromkeys(kw, vartype))
def __getstate__():
state = {}
g = globals()
for k, v in _state_vars.items():
state[k] = g['_sget_' + v](g[k])
return state
def __setstate__(state):
g = globals()
for k, v in state.items():
g['_sset_' + _state_vars[k]](k, g[k], v)
return state
def _sget_dict(val):
return val.copy()
def _sset_dict(key, ob, state):
ob.clear()
ob.update(state)
def _sget_object(val):
return val.__getstate__()
def _sset_object(key, ob, state):
ob.__setstate__(state)
_sget_none = _sset_none = lambda *args: None
def get_supported_platform():
"""Return this platform's maximum compatible version.
distutils.util.get_platform() normally reports the minimum version
of Mac OS X that would be required to *use* extensions produced by
distutils. But what we want when checking compatibility is to know the
version of Mac OS X that we are *running*. To allow usage of packages that
explicitly require a newer version of Mac OS X, we must also know the
current version of the OS.
If this condition occurs for any other platform with a version in its
platform strings, this function should be extended accordingly.
"""
plat = get_build_platform()
m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
except ValueError:
# not Mac OS X
pass
return plat
__all__ = [
# Basic resource access and distribution/entry point discovery
'require', 'run_script', 'get_provider', 'get_distribution',
'load_entry_point', 'get_entry_map', 'get_entry_info',
'iter_entry_points',
'resource_string', 'resource_stream', 'resource_filename',
'resource_listdir', 'resource_exists', 'resource_isdir',
# Environmental control
'declare_namespace', 'working_set', 'add_activation_listener',
'find_distributions', 'set_extraction_path', 'cleanup_resources',
'get_default_cache',
# Primary implementation classes
'Environment', 'WorkingSet', 'ResourceManager',
'Distribution', 'Requirement', 'EntryPoint',
# Exceptions
'ResolutionError', 'VersionConflict', 'DistributionNotFound',
'UnknownExtra', 'ExtractionError',
# Warnings
'PEP440Warning',
# Parsing functions and string utilities
'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
# filesystem utilities
'ensure_directory', 'normalize_path',
# Distribution "precedence" constants
'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
# "Provider" interfaces, implementations, and registration/lookup APIs
'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
'register_finder', 'register_namespace_handler', 'register_loader_type',
'fixup_namespace_packages', 'get_importer',
# Deprecated/backward compatibility only
'run_main', 'AvailableDistributions',
]
class ResolutionError(Exception):
"""Abstract base for dependency resolution errors"""
def __repr__(self):
return self.__class__.__name__ + repr(self.args)
class VersionConflict(ResolutionError):
"""
An already-installed version conflicts with the requested version.
Should be initialized with the installed Distribution and the requested
Requirement.
"""
_template = "{self.dist} is installed but {self.req} is required"
@property
def dist(self):
return self.args[0]
@property
def req(self):
return self.args[1]
def report(self):
return self._template.format(**locals())
def with_context(self, required_by):
"""
If required_by is non-empty, return a version of self that is a
ContextualVersionConflict.
"""
if not required_by:
return self
args = self.args + (required_by,)
return ContextualVersionConflict(*args)
class ContextualVersionConflict(VersionConflict):
"""
A VersionConflict that accepts a third parameter, the set of the
requirements that required the installed Distribution.
"""
_template = VersionConflict._template + ' by {self.required_by}'
@property
def required_by(self):
return self.args[2]
class DistributionNotFound(ResolutionError):
"""A requested distribution was not found"""
_template = ("The '{self.req}' distribution was not found "
"and is required by {self.requirers_str}")
@property
def req(self):
return self.args[0]
@property
def requirers(self):
return self.args[1]
@property
def requirers_str(self):
if not self.requirers:
return 'the application'
return ', '.join(self.requirers)
def report(self):
return self._template.format(**locals())
def __str__(self):
return self.report()
class UnknownExtra(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories = {}
PY_MAJOR = sys.version[:3]
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq, Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
def _macosx_vers(_cache=[]):
if not _cache:
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == '':
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
if hasattr(plistlib, 'readPlist'):
plist_content = plistlib.readPlist(plist)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
_cache.append(version.split('.'))
return _cache[0]
def _macosx_arch(machine):
return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions
XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and Mac OS X.
"""
try:
# Python 2.7 or >=3.2
from sysconfig import get_platform
except ImportError:
from distutils.util import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macosx_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]),
_macosx_arch(machine))
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
# XXX backward compat
get_platform = get_build_platform
def compatible_platforms(provided, required):
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided == required:
# easy case
return True
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macosx designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if dversion == 7 and macosversion >= "10.3" or \
dversion == 8 and macosversion >= "10.4":
return True
# egg isn't macosx or legacy darwin
return False
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or \
provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns)
# backward compatibility
run_main = run_script
def get_distribution(dist):
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist, six.string_types):
dist = Requirement.parse(dist)
if isinstance(dist, Requirement):
dist = get_provider(dist)
if not isinstance(dist, Distribution):
raise TypeError("Expected string, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist, group, name):
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
def has_metadata(name):
"""Does the package's distribution contain the named metadata?"""
def get_metadata(name):
"""The named metadata resource as a string"""
def get_metadata_lines(name):
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
def metadata_isdir(name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
def metadata_listdir(name):
"""List of metadata names in the directory (like ``os.listdir()``)"""
def run_script(script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
class IResourceProvider(IMetadataProvider):
"""An object that provides access to package resources"""
def get_resource_filename(manager, resource_name):
"""Return a true filesystem path for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_stream(manager, resource_name):
"""Return a readable file-like object for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_string(manager, resource_name):
"""Return a string containing the contents of `resource_name`
`manager` must be an ``IResourceManager``"""
def has_resource(resource_name):
"""Does the package contain the named resource?"""
def resource_isdir(resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)"""
def resource_listdir(resource_name):
"""List of resource names in the directory (like ``os.listdir()``)"""
class WorkingSet(object):
"""A collection of active distributions on sys.path (or a similar list)"""
def __init__(self, entries=None):
"""Create working set from list of path entries (default=sys.path)"""
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
@classmethod
def _build_master(cls):
"""
Prepare the master working set.
"""
ws = cls()
try:
from __main__ import __requires__
except ImportError:
# The main program does not list any requirements
return ws
# ensure the requirements are met
try:
ws.require(__requires__)
except VersionConflict:
return cls._build_from_requirements(__requires__)
return ws
@classmethod
def _build_from_requirements(cls, req_spec):
"""
Build a working set from a requirement spec. Rewrites sys.path.
"""
# try it without defaults already on sys.path
# by starting with an empty path
ws = cls([])
reqs = parse_requirements(req_spec)
dists = ws.resolve(reqs, Environment())
for dist in dists:
ws.add(dist)
# add any missing entries from sys.path
for entry in sys.path:
if entry not in ws.entries:
ws.add_entry(entry)
# then copy back to sys.path
sys.path[:] = ws.entries
return ws
def add_entry(self, entry):
"""Add a path item to ``.entries``, finding any distributions on it
``find_distributions(entry, True)`` is used to find distributions
corresponding to the path entry, and they are added. `entry` is
always appended to ``.entries``, even if it is already present.
(This is because ``sys.path`` can contain the same value more than
once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
equal ``sys.path``.)
"""
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in find_distributions(entry, True):
self.add(dist, entry, False)
def __contains__(self, dist):
"""True if `dist` is the active distribution for its project"""
return self.by_key.get(dist.key) == dist
def find(self, req):
"""Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
"""
dist = self.by_key.get(req.key)
if dist is not None and dist not in req:
# XXX add more info
raise VersionConflict(dist, req)
return dist
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
for dist in self:
entries = dist.get_entry_map(group)
if name is None:
for ep in entries.values():
yield ep
elif name in entries:
yield entries[name]
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns)
def __iter__(self):
"""Yield distributions for non-duplicate projects in the working set
The yield order is the order in which the items' path entries were
added to the working set.
"""
seen = {}
for item in self.entries:
if item not in self.entry_keys:
# workaround a cache issue
continue
for key in self.entry_keys[item]:
if key not in seen:
seen[key] = 1
yield self.by_key[key]
def add(self, dist, entry=None, insert=True, replace=False):
"""Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set, unless `replace=True`.
If it's added, any callbacks registered with the ``subscribe()`` method
will be called.
"""
if insert:
dist.insert_on(self.entries, entry, replace=replace)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry, [])
keys2 = self.entry_keys.setdefault(dist.location, [])
if not replace and dist.key in self.by_key:
# ignore hidden distros
return
self.by_key[dist.key] = dist
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
def resolve(self, requirements, env=None, installer=None,
replace_conflicting=False):
"""List all distributions needed to (recursively) meet `requirements`
`requirements` must be a sequence of ``Requirement`` objects. `env`,
if supplied, should be an ``Environment`` instance. If
not supplied, it defaults to all distributions available within any
entry or distribution in the working set. `installer`, if supplied,
will be invoked with each requirement that cannot be met by an
already-installed distribution; it should return a ``Distribution`` or
``None``.
Unless `replace_conflicting=True`, raises a VersionConflict exception if
any requirements are found on the path that have the correct name but
the wrong version. Otherwise, if an `installer` is supplied it will be
invoked to obtain the correct version of the requirement and activate
it.
"""
# set up the stack
requirements = list(requirements)[::-1]
# set of processed requirements
processed = {}
# key -> dist
best = {}
to_activate = []
req_extras = _ReqExtras()
# Mapping of requirement to set of distributions that required it;
# useful for reporting info about conflicts.
required_by = collections.defaultdict(set)
while requirements:
# process dependencies breadth-first
req = requirements.pop(0)
if req in processed:
# Ignore cyclic or redundant dependencies
continue
if not req_extras.markers_pass(req):
continue
dist = best.get(req.key)
if dist is None:
# Find the best distribution and add it to the map
dist = self.by_key.get(req.key)
if dist is None or (dist not in req and replace_conflicting):
ws = self
if env is None:
if dist is None:
env = Environment(self.entries)
else:
# Use an empty environment and workingset to avoid
# any further conflicts with the conflicting
# distribution
env = Environment([])
ws = WorkingSet([])
dist = best[req.key] = env.best_match(req, ws, installer)
if dist is None:
requirers = required_by.get(req, None)
raise DistributionNotFound(req, requirers)
to_activate.append(dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency
dependent_req = required_by[req]
raise VersionConflict(dist, req).with_context(dependent_req)
# push the new requirements onto the stack
new_requirements = dist.requires(req.extras)[::-1]
requirements.extend(new_requirements)
# Register the new requirements needed by req
for new_requirement in new_requirements:
required_by[new_requirement].add(req.project_name)
req_extras[new_requirement] = req.extras
processed[req] = True
# return list of distros to activate
return to_activate
def find_plugins(self, plugin_env, full_env=None, installer=None,
fallback=True):
"""Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
# add plugins+libs to sys.path
map(working_set.add, distributions)
# display errors
print('Could not load', errors)
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance.
"""
plugin_projects = list(plugin_env)
# scan project names in alphabetic order
plugin_projects.sort()
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
# put all our entries in shadow_set
list(map(shadow_set.add, self))
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError as v:
# save error info
error_info[dist] = v
if fallback:
# try the next older version of project
continue
else:
# give up on this project, keep going
break
else:
list(map(shadow_set.add, resolvees))
distributions.update(dict.fromkeys(resolvees))
# success, no need to try any more versions of this project
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info
def require(self, *requirements):
"""Ensure that distributions matching `requirements` are activated
`requirements` must be a string or a (possibly-nested) sequence
thereof, specifying the distributions and versions required. The
return value is a sequence of the distributions that needed to be
activated to fulfill the requirements; all relevant distributions are
included, even if they were already activated in this working set.
"""
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed
def subscribe(self, callback, existing=True):
"""Invoke `callback` for all distributions
If `existing=True` (default),
call on all existing ones, as well.
"""
if callback in self.callbacks:
return
self.callbacks.append(callback)
if not existing:
return
for dist in self:
callback(dist)
def _added_new(self, dist):
for callback in self.callbacks:
callback(dist)
def __getstate__(self):
return (
self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
self.callbacks[:]
)
def __setstate__(self, e_k_b_c):
entries, keys, by_key, callbacks = e_k_b_c
self.entries = entries[:]
self.entry_keys = keys.copy()
self.by_key = by_key.copy()
self.callbacks = callbacks[:]
class _ReqExtras(dict):
"""
Map each requirement to the extras that demanded it.
"""
def markers_pass(self, req):
"""
Evaluate markers for req against each extra that
demanded it.
Return False if the req has a marker and fails
evaluation. Otherwise, return True.
"""
extra_evals = (
req.marker.evaluate({'extra': extra})
for extra in self.get(req, ()) + (None,)
)
return not req.marker or any(extra_evals)
class Environment(object):
"""Searchable snapshot of distributions on a search path"""
def __init__(self, search_path=None, platform=get_supported_platform(),
python=PY_MAJOR):
"""Snapshot distributions available on a search path
Any distributions found on `search_path` are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used.
`platform` is an optional string specifying the name of the platform
that platform-specific distributions must be compatible with. If
unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'3.3'``);
it defaults to the current version.
You may explicitly set `platform` (and/or `python`) to ``None`` if you
wish to map *all* distributions, not just those compatible with the
running platform or Python version.
"""
self._distmap = {}
self.platform = platform
self.python = python
self.scan(search_path)
def can_add(self, dist):
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
"""
return (self.python is None or dist.py_version is None
or dist.py_version == self.python) \
and compatible_platforms(dist.platform, self.platform)
def remove(self, dist):
"""Remove `dist` from the environment"""
self._distmap[dist.key].remove(dist)
def scan(self, search_path=None):
"""Scan `search_path` for distributions usable in this environment
Any distributions found are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used. Only distributions conforming to
the platform/python version defined at initialization are added.
"""
if search_path is None:
search_path = sys.path
for item in search_path:
for dist in find_distributions(item):
self.add(dist)
def __getitem__(self, project_name):
"""Return a newest-to-oldest list of distributions for `project_name`
Uses case-insensitive `project_name` comparison, assuming all the
project's distributions use their project's name converted to all
lowercase as their key.
"""
distribution_key = project_name.lower()
return self._distmap.get(distribution_key, [])
def add(self, dist):
"""Add `dist` if we ``can_add()`` it and it has not already been added
"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key, [])
if dist not in dists:
dists.append(dist)
dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
def best_match(self, req, working_set, installer=None):
"""Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned.
"""
dist = working_set.find(req)
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
# try to download/install
return self.obtain(req, installer)
def obtain(self, requirement, installer=None):
"""Obtain a distribution matching `requirement` (e.g. via download)
Obtain a distro that matches requirement (e.g. via download). In the
base ``Environment`` class, this routine just returns
``installer(requirement)``, unless `installer` is None, in which case
None is returned instead. This method is a hook that allows subclasses
to attempt other ways of obtaining a distribution before falling back
to the `installer` argument."""
if installer is not None:
return installer(requirement)
def __iter__(self):
"""Yield the unique project names of the available distributions"""
for key in self._distmap.keys():
if self[key]:
yield key
def __iadd__(self, other):
"""In-place addition of a distribution or environment"""
if isinstance(other, Distribution):
self.add(other)
elif isinstance(other, Environment):
for project in other:
for dist in other[project]:
self.add(dist)
else:
raise TypeError("Can't add %r to environment" % (other,))
return self
def __add__(self, other):
"""Add an environment or distribution to an environment"""
new = self.__class__([], platform=None, python=None)
for env in self, other:
new += env
return new
# XXX backward compatibility
AvailableDistributions = Environment
class ExtractionError(RuntimeError):
"""An error occurred extracting a resource
The following attributes are available from instances of this exception:
manager
The resource manager that raised this exception
cache_path
The base directory for resource extraction
original_error
The exception instance that caused extraction to fail
"""
class ResourceManager:
"""Manage resource extraction and packages"""
extraction_path = None
def __init__(self):
self.cached_files = {}
def resource_exists(self, package_or_requirement, resource_name):
"""Does the named resource exist?"""
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(self, package_or_requirement, resource_name):
"""Is the named resource an existing directory?"""
return get_provider(package_or_requirement).resource_isdir(
resource_name
)
def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
def resource_stream(self, package_or_requirement, resource_name):
"""Return a readable file-like object for specified resource"""
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name
)
def resource_string(self, package_or_requirement, resource_name):
"""Return specified resource as a string"""
return get_provider(package_or_requirement).get_resource_string(
self, resource_name
)
def resource_listdir(self, package_or_requirement, resource_name):
"""List the contents of the named resource directory"""
return get_provider(package_or_requirement).resource_listdir(
resource_name
)
def extraction_error(self):
"""Give an error message for problems extracting file(s)"""
old_exc = sys.exc_info()[1]
cache_path = self.extraction_path or get_default_cache()
tmpl = textwrap.dedent("""
Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s) to the Python egg
cache:
{old_exc}
The Python egg cache directory is currently set to:
{cache_path}
Perhaps your account does not have write access to this directory? You can
change the cache directory by setting the PYTHON_EGG_CACHE environment
variable to point to an accessible directory.
""").lstrip()
err = ExtractionError(tmpl.format(**locals()))
err.manager = self
err.cache_path = cache_path
err.original_error = old_exc
raise err
def get_cache_path(self, archive_name, names=()):
"""Return absolute location in cache for `archive_name` and `names`
The parent directory of the resulting path will be created if it does
not already exist. `archive_name` should be the base filename of the
enclosing egg (which may not be the name of the enclosing zipfile!),
including its ".egg" extension. `names`, if provided, should be a
sequence of path name parts "under" the egg's extraction location.
This method should only be called by resource providers that need to
obtain an extraction location, and only for names they intend to
extract, as it tracks the generated names for possible cleanup later.
"""
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name + '-tmp', *names)
try:
_bypass_ensure_directory(target_path)
except:
self.extraction_error()
self._warn_unsafe_extraction_path(extract_path)
self.cached_files[target_path] = 1
return target_path
@staticmethod
def _warn_unsafe_extraction_path(path):
"""
If the default extraction path is overridden and set to an insecure
location, such as /tmp, it opens up an opportunity for an attacker to
replace an extracted file with an unauthorized payload. Warn the user
if a known insecure location is used.
See Distribute #375 for more details.
"""
if os.name == 'nt' and not path.startswith(os.environ['windir']):
# On Windows, permissions are generally restrictive by default
# and temp directories are not writable by other users, so
# bypass the warning.
return
mode = os.stat(path).st_mode
if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
msg = ("%s is writable by group/others and vulnerable to attack "
"when "
"used with get_resource_filename. Consider a more secure "
"location (set with .set_extraction_path or the "
"PYTHON_EGG_CACHE environment variable)." % path)
warnings.warn(msg, UserWarning)
def postprocess(self, tempname, filename):
"""Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
"""
if os.name == 'posix':
# Make the resource executable
mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
os.chmod(tempname, mode)
def set_extraction_path(self, path):
"""Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
"""
if self.cached_files:
raise ValueError(
"Can't change extraction path, files already extracted"
)
self.extraction_path = path
def cleanup_resources(self, force=False):
"""
Delete all extracted resource files and directories, returning a list
of the file and directory names that could not be successfully removed.
This function does not have any concurrency protection, so it should
generally only be called when the extraction path is a temporary
directory exclusive to a single process. This method is not
automatically called; you must call it explicitly or register it as an
``atexit`` function if you wish to ensure cleanup of a temporary
directory used for extractions.
"""
# XXX
def get_default_cache():
"""
Return the ``PYTHON_EGG_CACHE`` environment variable
or a platform-relevant user cache dir for an app
named "Python-Eggs".
"""
return (
os.environ.get('PYTHON_EGG_CACHE')
or appdirs.user_cache_dir(appname='Python-Eggs')
)
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""
Convert an arbitrary string to a standard version string
"""
try:
# normalize the version
return str(packaging.version.Version(version))
except packaging.version.InvalidVersion:
version = version.replace(' ', '.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def safe_extra(extra):
"""Convert an arbitrary string to a standard 'extra' name
Any runs of non-alphanumeric characters are replaced with a single '_',
and the result is always lowercased.
"""
return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower()
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-', '_')
def invalid_marker(text):
"""
Validate text as a PEP 508 environment marker; return an exception
if invalid or False otherwise.
"""
try:
evaluate_marker(text)
except SyntaxError as e:
e.filename = None
e.lineno = None
return e
return False
def evaluate_marker(text, extra=None):
"""
Evaluate a PEP 508 environment marker.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
This implementation uses the 'pyparsing' module.
"""
try:
marker = packaging.markers.Marker(text)
return marker.evaluate()
except packaging.markers.InvalidMarker as e:
raise SyntaxError(e)
class NullProvider:
"""Try to implement resources and metadata for arbitrary PEP 302 loaders"""
egg_name = None
egg_info = None
loader = None
def __init__(self, module):
self.loader = getattr(module, '__loader__', None)
self.module_path = os.path.dirname(getattr(module, '__file__', ''))
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return io.BytesIO(self.get_resource_string(manager, resource_name))
def get_resource_string(self, manager, resource_name):
return self._get(self._fn(self.module_path, resource_name))
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def has_metadata(self, name):
return self.egg_info and self._has(self._fn(self.egg_info, name))
def get_metadata(self, name):
if not self.egg_info:
return ""
value = self._get(self._fn(self.egg_info, name))
return value.decode('utf-8') if six.PY3 else value
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def resource_isdir(self, resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def metadata_isdir(self, name):
return self.egg_info and self._isdir(self._fn(self.egg_info, name))
def resource_listdir(self, resource_name):
return self._listdir(self._fn(self.module_path, resource_name))
def metadata_listdir(self, name):
if self.egg_info:
return self._listdir(self._fn(self.egg_info, name))
return []
def run_script(self, script_name, namespace):
script = 'scripts/' + script_name
if not self.has_metadata(script):
raise ResolutionError("No script named %r" % script_name)
script_text = self.get_metadata(script).replace('\r\n', '\n')
script_text = script_text.replace('\r', '\n')
script_filename = self._fn(self.egg_info, script)
namespace['__file__'] = script_filename
if os.path.exists(script_filename):
source = open(script_filename).read()
code = compile(source, script_filename, 'exec')
exec(code, namespace, namespace)
else:
from linecache import cache
cache[script_filename] = (
len(script_text), 0, script_text.split('\n'), script_filename
)
script_code = compile(script_text, script_filename, 'exec')
exec(script_code, namespace, namespace)
def _has(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _isdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _listdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _fn(self, base, resource_name):
if resource_name:
return os.path.join(base, *resource_name.split('/'))
return base
def _get(self, path):
if hasattr(self.loader, 'get_data'):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
register_loader_type(object, NullProvider)
class EggProvider(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self, module):
NullProvider.__init__(self, module)
self._setup_prefix()
def _setup_prefix(self):
# we assume here that our metadata may be nested inside a "basket"
# of multiple eggs; that's why we use module_path instead of .archive
path = self.module_path
old = None
while path != old:
if _is_unpacked_egg(path):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
break
old = path
path, base = os.path.split(path)
class DefaultProvider(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path):
return os.path.exists(path)
def _isdir(self, path):
return os.path.isdir(path)
def _listdir(self, path):
return os.listdir(path)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path):
with open(path, 'rb') as stream:
return stream.read()
@classmethod
def _register(cls):
loader_cls = getattr(importlib_machinery, 'SourceFileLoader',
type(None))
register_loader_type(loader_cls, cls)
DefaultProvider._register()
class EmptyProvider(NullProvider):
"""Provider that returns nothing for all requests"""
_isdir = _has = lambda self, path: False
_get = lambda self, path: ''
_listdir = lambda self, path: []
module_path = None
def __init__(self):
pass
empty_provider = EmptyProvider()
class ZipManifests(dict):
"""
zip manifest builder
"""
@classmethod
def build(cls, path):
"""
Build a dictionary similar to the zipimport directory
caches, except instead of tuples, store ZipInfo objects.
Use a platform-specific path separator (os.sep) for the path keys
for compatibility with pypy on Windows.
"""
with ContextualZipFile(path) as zfile:
items = (
(
name.replace('/', os.sep),
zfile.getinfo(name),
)
for name in zfile.namelist()
)
return dict(items)
load = build
class MemoizedZipManifests(ZipManifests):
"""
Memoized zipfile manifests.
"""
manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
def load(self, path):
"""
Load a manifest at path or return a suitable manifest already loaded.
"""
path = os.path.normpath(path)
mtime = os.stat(path).st_mtime
if path not in self or self[path].mtime != mtime:
manifest = self.build(path)
self[path] = self.manifest_mod(manifest, mtime)
return self[path].manifest
class ContextualZipFile(zipfile.ZipFile):
"""
Supplement ZipFile class to support context manager for Python 2.6
"""
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __new__(cls, *args, **kwargs):
"""
Construct a ZipFile or ContextualZipFile as appropriate
"""
if hasattr(zipfile.ZipFile, '__exit__'):
return zipfile.ZipFile(*args, **kwargs)
return super(ContextualZipFile, cls).__new__(cls)
class ZipProvider(EggProvider):
"""Resource support for zips and eggs"""
eagers = None
_zip_manifests = MemoizedZipManifests()
def __init__(self, module):
EggProvider.__init__(self, module)
self.zip_pre = self.loader.archive + os.sep
def _zipinfo_name(self, fspath):
# Convert a virtual filename (full path to file) into a zipfile subpath
# usable with the zipimport directory cache for our target archive
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre):]
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.zip_pre)
)
def _parts(self, zip_path):
# Convert a zipfile subpath into an egg-relative path part list.
# pseudo-fs path
fspath = self.zip_pre + zip_path
if fspath.startswith(self.egg_root + os.sep):
return fspath[len(self.egg_root) + 1:].split(os.sep)
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.egg_root)
)
@property
def zipinfo(self):
return self._zip_manifests.load(self.loader.archive)
def get_resource_filename(self, manager, resource_name):
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
# no need to lock for extraction, since we use temp names
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if '/'.join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
@staticmethod
def _get_date_and_size(zip_stat):
size = zip_stat.file_size
# ymdhms+wday, yday, dst
date_time = zip_stat.date_time + (0, 0, -1)
# 1980 offset already done
timestamp = time.mktime(date_time)
return timestamp, size
def _extract_resource(self, manager, zip_path):
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(
manager, os.path.join(zip_path, name)
)
# return the extracted directory name
return os.path.dirname(last)
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not WRITE_SUPPORT:
raise IOError('"os.rename" and "os.unlink" are not supported '
'on this platform')
try:
real_path = manager.get_cache_path(
self.egg_name, self._parts(zip_path)
)
if self._is_current(real_path, zip_path):
return real_path
outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path))
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp, timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except os.error:
if os.path.isfile(real_path):
if self._is_current(real_path, zip_path):
# the file became current since it was checked above,
# so proceed.
return real_path
# Windows, del old file and retry
elif os.name == 'nt':
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except os.error:
# report a user-friendly error
manager.extraction_error()
return real_path
def _is_current(self, file_path, zip_path):
"""
Return True if the file_path is current for this zip_path
"""
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not os.path.isfile(file_path):
return False
stat = os.stat(file_path)
if stat.st_size != size or stat.st_mtime != timestamp:
return False
# check that the contents match
zip_contents = self.loader.get_data(zip_path)
with open(file_path, 'rb') as f:
file_contents = f.read()
return zip_contents == file_contents
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ('native_libs.txt', 'eager_resources.txt'):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath):
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self, fspath):
return self._zipinfo_name(fspath) in self._index()
def _listdir(self, fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.egg_root, resource_name))
def _resource_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.module_path, resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
"""Metadata handler for standalone PKG-INFO files
Usage::
metadata = FileMetadata("/path/to/PKG-INFO")
This provider rejects all data and metadata requests except for PKG-INFO,
which is treated as existing, and will be the contents of the file at
the provided location.
"""
def __init__(self, path):
self.path = path
def has_metadata(self, name):
return name == 'PKG-INFO' and os.path.isfile(self.path)
def get_metadata(self, name):
if name != 'PKG-INFO':
raise KeyError("No metadata except PKG-INFO is available")
with io.open(self.path, encoding='utf-8', errors="replace") as f:
metadata = f.read()
self._warn_on_replacement(metadata)
return metadata
def _warn_on_replacement(self, metadata):
# Python 2.6 and 3.2 compat for: replacement_char = '�'
replacement_char = b'\xef\xbf\xbd'.decode('utf-8')
if replacement_char in metadata:
tmpl = "{self.path} could not be properly decoded in UTF-8"
msg = tmpl.format(**locals())
warnings.warn(msg)
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
"""Metadata provider for egg directories
Usage::
# Development eggs:
egg_info = "/path/to/PackageName.egg-info"
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
# Unpacked egg directories:
egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
"""
def __init__(self, path, egg_info):
self.module_path = path
self.egg_info = egg_info
class EggMetadata(ZipProvider):
"""Metadata provider for .egg files"""
def __init__(self, importer):
"""Create a metadata provider from a zipimporter"""
self.zip_pre = importer.archive + os.sep
self.loader = importer
if importer.prefix:
self.module_path = os.path.join(importer.archive, importer.prefix)
else:
self.module_path = importer.archive
self._setup_prefix()
_declare_state('dict', _distribution_finders={})
def register_finder(importer_type, distribution_finder):
"""Register `distribution_finder` to find distributions in sys.path items
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `distribution_finder` is a callable that, passed a path
item and the importer instance, yields ``Distribution`` instances found on
that path item. See ``pkg_resources.find_on_path`` for an example."""
_distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
"""Yield distributions accessible via `path_item`"""
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only)
def find_eggs_in_zip(importer, path_item, only=False):
"""
Find eggs in zip files; possibly multiple nested eggs.
"""
if importer.archive.endswith('.whl'):
# wheels are not supported with this finder
# they don't have PKG-INFO metadata, and won't ever contain eggs
return
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
# don't yield nested distros
return
for subitem in metadata.resource_listdir('/'):
if _is_unpacked_egg(subitem):
subpath = os.path.join(path_item, subitem)
for dist in find_eggs_in_zip(zipimport.zipimporter(subpath), subpath):
yield dist
register_finder(zipimport.zipimporter, find_eggs_in_zip)
def find_nothing(importer, path_item, only=False):
return ()
register_finder(object, find_nothing)
def _by_version_descending(names):
"""
Given a list of filenames, return them in descending order
by version number.
>>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg'
>>> _by_version_descending(names)
['Python-2.7.10.egg', 'Python-2.7.2.egg', 'foo', 'bar']
>>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg'
>>> _by_version_descending(names)
['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg']
>>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg'
>>> _by_version_descending(names)
['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg']
"""
def _by_version(name):
"""
Parse each component of the filename
"""
name, ext = os.path.splitext(name)
parts = itertools.chain(name.split('-'), [ext])
return [packaging.version.parse(part) for part in parts]
return sorted(names, key=_by_version, reverse=True)
def find_on_path(importer, path_item, only=False):
"""Yield distributions accessible on a sys.path directory"""
path_item = _normalize_cached(path_item)
if os.path.isdir(path_item) and os.access(path_item, os.R_OK):
if _is_unpacked_egg(path_item):
yield Distribution.from_filename(
path_item, metadata=PathMetadata(
path_item, os.path.join(path_item, 'EGG-INFO')
)
)
else:
# scan for .egg and .egg-info in directory
path_item_entries = _by_version_descending(os.listdir(path_item))
for entry in path_item_entries:
lower = entry.lower()
if lower.endswith('.egg-info') or lower.endswith('.dist-info'):
fullpath = os.path.join(path_item, entry)
if os.path.isdir(fullpath):
# egg-info directory, allow getting metadata
if len(os.listdir(fullpath)) == 0:
# Empty egg directory, skip.
continue
metadata = PathMetadata(path_item, fullpath)
else:
metadata = FileMetadata(fullpath)
yield Distribution.from_location(
path_item, entry, metadata, precedence=DEVELOP_DIST
)
elif not only and _is_unpacked_egg(entry):
dists = find_distributions(os.path.join(path_item, entry))
for dist in dists:
yield dist
elif not only and lower.endswith('.egg-link'):
with open(os.path.join(path_item, entry)) as entry_file:
entry_lines = entry_file.readlines()
for line in entry_lines:
if not line.strip():
continue
path = os.path.join(path_item, line.rstrip())
dists = find_distributions(path)
for item in dists:
yield item
break
register_finder(pkgutil.ImpImporter, find_on_path)
if hasattr(importlib_machinery, 'FileFinder'):
register_finder(importlib_machinery.FileFinder, find_on_path)
_declare_state('dict', _namespace_handlers={})
_declare_state('dict', _namespace_packages={})
def register_namespace_handler(importer_type, namespace_handler):
"""Register `namespace_handler` to declare namespace packages
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `namespace_handler` is a callable like this::
def namespace_handler(importer, path_entry, moduleName, module):
# return a path_entry to use for child packages
Namespace handlers are only called if the importer object has already
agreed that it can handle the relevant path item, and they should only
return a subpath if the module __path__ does not already contain an
equivalent subpath. For an example namespace handler, see
``pkg_resources.file_ns_handler``.
"""
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
"""Ensure that named package includes a subpath of path_item (if needed)"""
importer = get_importer(path_item)
if importer is None:
return None
loader = importer.find_module(packageName)
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = types.ModuleType(packageName)
module.__path__ = []
_set_parent_ns(packageName)
elif not hasattr(module, '__path__'):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer, path_item, packageName, module)
if subpath is not None:
path = module.__path__
path.append(subpath)
loader.load_module(packageName)
_rebuild_mod_path(path, packageName, module)
return subpath
def _rebuild_mod_path(orig_path, package_name, module):
"""
Rebuild module.__path__ ensuring that all entries are ordered
corresponding to their sys.path order
"""
sys_path = [_normalize_cached(p) for p in sys.path]
def safe_sys_path_index(entry):
"""
Workaround for #520 and #513.
"""
try:
return sys_path.index(entry)
except ValueError:
return float('inf')
def position_in_sys_path(path):
"""
Return the ordinal of the path based on its position in sys.path
"""
path_parts = path.split(os.sep)
module_parts = package_name.count('.') + 1
parts = path_parts[:-module_parts]
return safe_sys_path_index(_normalize_cached(os.sep.join(parts)))
orig_path.sort(key=position_in_sys_path)
module.__path__[:] = [_normalize_cached(p) for p in orig_path]
def declare_namespace(packageName):
"""Declare that package 'packageName' is a namespace package"""
_imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path, parent = sys.path, None
if '.' in packageName:
parent = '.'.join(packageName.split('.')[:-1])
declare_namespace(parent)
if parent not in _namespace_packages:
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError:
raise TypeError("Not a package:", parent)
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent, []).append(packageName)
_namespace_packages.setdefault(packageName, [])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
_imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
"""Ensure that previously-declared namespace packages include path_item"""
_imp.acquire_lock()
try:
for package in _namespace_packages.get(parent, ()):
subpath = _handle_ns(package, path_item)
if subpath:
fixup_namespace_packages(subpath, package)
finally:
_imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item) == normalized:
break
else:
# Only return the path if it's not already there
return subpath
register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
register_namespace_handler(zipimport.zipimporter, file_ns_handler)
if hasattr(importlib_machinery, 'FileFinder'):
register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
return None
register_namespace_handler(object, null_ns_handler)
def normalize_path(filename):
"""Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(filename))
def _normalize_cached(filename, _cache={}):
try:
return _cache[filename]
except KeyError:
_cache[filename] = result = normalize_path(filename)
return result
def _is_unpacked_egg(path):
"""
Determine if given path appears to be an unpacked egg.
"""
return (
path.lower().endswith('.egg')
)
def _set_parent_ns(packageName):
parts = packageName.split('.')
name = parts.pop()
if parts:
parent = '.'.join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
def yield_lines(strs):
"""Yield non-empty/non-comment lines of a string or sequence"""
if isinstance(strs, six.string_types):
for s in strs.splitlines():
s = s.strip()
# skip blank lines/comments
if s and not s.startswith('#'):
yield s
else:
for ss in strs:
for s in yield_lines(ss):
yield s
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"""
(?P<name>[^-]+) (
-(?P<ver>[^-]+) (
-py(?P<pyver>[^-]+) (
-(?P<plat>.+)
)?
)?
)?
""",
re.VERBOSE | re.IGNORECASE,
).match
class EntryPoint(object):
"""Object representing an advertised importable object"""
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras
self.dist = dist
def __str__(self):
s = "%s = %s" % (self.name, self.module_name)
if self.attrs:
s += ':' + '.'.join(self.attrs)
if self.extras:
s += ' [%s]' % ','.join(self.extras)
return s
def __repr__(self):
return "EntryPoint.parse(%r)" % str(self)
def load(self, require=True, *args, **kwargs):
"""
Require packages for this EntryPoint, then resolve it.
"""
if not require or args or kwargs:
warnings.warn(
"Parameters to load are deprecated. Call .resolve and "
".require separately.",
DeprecationWarning,
stacklevel=2,
)
if require:
self.require(*args, **kwargs)
return self.resolve()
def resolve(self):
"""
Resolve the entry point from its module and attrs.
"""
module = __import__(self.module_name, fromlist=['__name__'], level=0)
try:
return functools.reduce(getattr, self.attrs, module)
except AttributeError as exc:
raise ImportError(str(exc))
def require(self, env=None, installer=None):
if self.extras and not self.dist:
raise UnknownExtra("Can't require() without a distribution", self)
reqs = self.dist.requires(self.extras)
items = working_set.resolve(reqs, env, installer)
list(map(working_set.add, items))
pattern = re.compile(
r'\s*'
r'(?P<name>.+?)\s*'
r'=\s*'
r'(?P<module>[\w.]+)\s*'
r'(:\s*(?P<attr>[\w.]+))?\s*'
r'(?P<extras>\[.*\])?\s*$'
)
@classmethod
def parse(cls, src, dist=None):
"""Parse a single entry point from string `src`
Entry point syntax follows the form::
name = some.module:some.attr [extra1, extra2]
The entry name and module name are required, but the ``:attrs`` and
``[extras]`` parts are optional
"""
m = cls.pattern.match(src)
if not m:
msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
raise ValueError(msg, src)
res = m.groupdict()
extras = cls._parse_extras(res['extras'])
attrs = res['attr'].split('.') if res['attr'] else ()
return cls(res['name'], res['module'], attrs, extras, dist)
@classmethod
def _parse_extras(cls, extras_spec):
if not extras_spec:
return ()
req = Requirement.parse('x' + extras_spec)
if req.specs:
raise ValueError()
return req.extras
@classmethod
def parse_group(cls, group, lines, dist=None):
"""Parse an entry point group"""
if not MODULE(group):
raise ValueError("Invalid group name", group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name] = ep
return this
@classmethod
def parse_map(cls, data, dist=None):
"""Parse a map of entry point groups"""
if isinstance(data, dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for group, lines in data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
def _remove_md5_fragment(location):
if not location:
return ''
parsed = urllib.parse.urlparse(location)
if parsed[-1].startswith('md5='):
return urllib.parse.urlunparse(parsed[:-1] + ('',))
return location
def _version_from_file(lines):
"""
Given an iterable of lines from a Metadata file, return
the value of the Version field, if present, or None otherwise.
"""
is_version_line = lambda line: line.lower().startswith('version:')
version_lines = filter(is_version_line, lines)
line = next(iter(version_lines), '')
_, _, value = line.partition(':')
return safe_version(value.strip()) or None
class Distribution(object):
"""Wrap an actual or potential sys.path entry w/metadata"""
PKG_INFO = 'PKG-INFO'
def __init__(self, location=None, metadata=None, project_name=None,
version=None, py_version=PY_MAJOR, platform=None,
precedence=EGG_DIST):
self.project_name = safe_name(project_name or 'Unknown')
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
@classmethod
def from_location(cls, location, basename, metadata=None, **kw):
project_name, version, py_version, platform = [None] * 4
basename, ext = os.path.splitext(basename)
if ext.lower() in _distributionImpl:
cls = _distributionImpl[ext.lower()]
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
'name', 'ver', 'pyver', 'plat'
)
return cls(
location, metadata, project_name=project_name, version=version,
py_version=py_version, platform=platform, **kw
)._reload_version()
def _reload_version(self):
return self
@property
def hashcmp(self):
return (
self.parsed_version,
self.precedence,
self.key,
_remove_md5_fragment(self.location),
self.py_version or '',
self.platform or '',
)
def __hash__(self):
return hash(self.hashcmp)
def __lt__(self, other):
return self.hashcmp < other.hashcmp
def __le__(self, other):
return self.hashcmp <= other.hashcmp
def __gt__(self, other):
return self.hashcmp > other.hashcmp
def __ge__(self, other):
return self.hashcmp >= other.hashcmp
def __eq__(self, other):
if not isinstance(other, self.__class__):
# It's not a Distribution, so they are not equal
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other):
return not self == other
# These properties have to be lazy so that we don't have to load any
# metadata until/unless it's actually needed. (i.e., some distributions
# may not know their name or version without loading PKG-INFO)
@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
@property
def parsed_version(self):
if not hasattr(self, "_parsed_version"):
self._parsed_version = parse_version(self.version)
return self._parsed_version
def _warn_legacy_version(self):
LV = packaging.version.LegacyVersion
is_legacy = isinstance(self._parsed_version, LV)
if not is_legacy:
return
# While an empty version is technically a legacy version and
# is not a valid PEP 440 version, it's also unlikely to
# actually come from someone and instead it is more likely that
# it comes from setuptools attempting to parse a filename and
# including it in the list. So for that we'll gate this warning
# on if the version is anything at all or not.
if not self.version:
return
tmpl = textwrap.dedent("""
'{project_name} ({version})' is being parsed as a legacy,
non PEP 440,
version. You may find odd behavior and sort order.
In particular it will be sorted as less than 0.0. It
is recommended to migrate to PEP 440 compatible
versions.
""").strip().replace('\n', ' ')
warnings.warn(tmpl.format(**vars(self)), PEP440Warning)
@property
def version(self):
try:
return self._version
except AttributeError:
version = _version_from_file(self._get_metadata(self.PKG_INFO))
if version is None:
tmpl = "Missing 'Version:' header and/or %s file"
raise ValueError(tmpl % self.PKG_INFO, self)
return version
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
dm = self.__dep_map = {None: []}
for name in 'requires.txt', 'depends.txt':
for extra, reqs in split_sections(self._get_metadata(name)):
if extra:
if ':' in extra:
extra, marker = extra.split(':', 1)
if invalid_marker(marker):
# XXX warn
reqs = []
elif not evaluate_marker(marker):
reqs = []
extra = safe_extra(extra) or None
dm.setdefault(extra, []).extend(parse_requirements(reqs))
return dm
def requires(self, extras=()):
"""List of Requirements needed for this distro if `extras` are used"""
dm = self._dep_map
deps = []
deps.extend(dm.get(None, ()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError:
raise UnknownExtra(
"%s has no such extra feature %r" % (self, ext)
)
return deps
def _get_metadata(self, name):
if self.has_metadata(name):
for line in self.get_metadata_lines(name):
yield line
def activate(self, path=None, replace=False):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None:
path = sys.path
self.insert_on(path, replace=replace)
if path is sys.path:
fixup_namespace_packages(self.location)
for pkg in self._get_metadata('namespace_packages.txt'):
if pkg in sys.modules:
declare_namespace(pkg)
def egg_name(self):
"""Return what this distribution's standard .egg filename should be"""
filename = "%s-%s-py%s" % (
to_filename(self.project_name), to_filename(self.version),
self.py_version or PY_MAJOR
)
if self.platform:
filename += '-' + self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self, self.location)
else:
return str(self)
def __str__(self):
try:
version = getattr(self, 'version', None)
except ValueError:
version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name, version)
def __getattr__(self, attr):
"""Delegate all unrecognized public attributes to .metadata provider"""
if attr.startswith('_'):
raise AttributeError(attr)
return getattr(self._provider, attr)
@classmethod
def from_filename(cls, filename, metadata=None, **kw):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata,
**kw
)
def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
if isinstance(self.parsed_version, packaging.version.Version):
spec = "%s==%s" % (self.project_name, self.parsed_version)
else:
spec = "%s===%s" % (self.project_name, self.parsed_version)
return Requirement.parse(spec)
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group, name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group, name),))
return ep.load()
def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return ep_map.get(group, {})
return ep_map
def get_entry_info(self, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return self.get_entry_map(group).get(name)
def insert_on(self, path, loc=None, replace=False):
"""Ensure self.location is on path
If replace=False (default):
- If location is already in path anywhere, do nothing.
- Else:
- If it's an egg and its parent directory is on path,
insert just ahead of the parent.
- Else: add to the end of path.
If replace=True:
- If location is already on path anywhere (not eggs)
or higher priority than its parent (eggs)
do nothing.
- Else:
- If it's an egg and its parent directory is on path,
insert just ahead of the parent,
removing any lower-priority entries.
- Else: add it to the front of path.
"""
loc = loc or self.location
if not loc:
return
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath = [(p and _normalize_cached(p) or p) for p in path]
for p, item in enumerate(npath):
if item == nloc:
if replace:
break
else:
# don't modify path (even removing duplicates) if found and not replace
return
elif item == bdir and self.precedence == EGG_DIST:
# if it's an .egg, give it precedence over its directory
# UNLESS it's already been added to sys.path and replace=False
if (not replace) and nloc in npath[p:]:
return
if path is sys.path:
self.check_version_conflict()
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
if path is sys.path:
self.check_version_conflict()
if replace:
path.insert(0, loc)
else:
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while True:
try:
np = npath.index(nloc, p + 1)
except ValueError:
break
else:
del npath[np], path[np]
# ha!
p = np
return
def check_version_conflict(self):
if self.key == 'setuptools':
# ignore the inevitable setuptools self-conflicts :(
return
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if (modname not in sys.modules or modname in nsp
or modname in _namespace_packages):
continue
if modname in ('pkg_resources', 'setuptools', 'site'):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if fn and (normalize_path(fn).startswith(loc) or
fn.startswith(self.location)):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for " + repr(self))
return False
return True
def clone(self, **kw):
"""Copy this distribution, substituting in any changed keyword args"""
names = 'project_name version py_version platform location precedence'
for attr in names.split():
kw.setdefault(attr, getattr(self, attr, None))
kw.setdefault('metadata', self._provider)
return self.__class__(**kw)
@property
def extras(self):
return [dep for dep in self._dep_map if dep]
class EggInfoDistribution(Distribution):
def _reload_version(self):
"""
Packages installed by distutils (e.g. numpy or scipy),
which uses an old safe_version, and so
their version numbers can get mangled when
converted to filenames (e.g., 1.11.0.dev0+2329eae to
1.11.0.dev0_2329eae). These distributions will not be
parsed properly
downstream by Distribution and safe_version, so
take an extra step and try to get the version number from
the metadata file itself instead of the filename.
"""
md_version = _version_from_file(self._get_metadata(self.PKG_INFO))
if md_version:
self._version = md_version
return self
class DistInfoDistribution(Distribution):
"""Wrap an actual or potential sys.path entry w/metadata, .dist-info style"""
PKG_INFO = 'METADATA'
EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
@property
def _parsed_pkg_info(self):
"""Parse and cache metadata"""
try:
return self._pkg_info
except AttributeError:
metadata = self.get_metadata(self.PKG_INFO)
self._pkg_info = email.parser.Parser().parsestr(metadata)
return self._pkg_info
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._compute_dependencies()
return self.__dep_map
def _compute_dependencies(self):
"""Recompute this distribution's dependencies."""
dm = self.__dep_map = {None: []}
reqs = []
# Including any condition expressions
for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
reqs.extend(parse_requirements(req))
def reqs_for_extra(extra):
for req in reqs:
if not req.marker or req.marker.evaluate({'extra': extra}):
yield req
common = frozenset(reqs_for_extra(None))
dm[None].extend(common)
for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
s_extra = safe_extra(extra.strip())
dm[s_extra] = list(frozenset(reqs_for_extra(extra)) - common)
return dm
_distributionImpl = {
'.egg': Distribution,
'.egg-info': EggInfoDistribution,
'.dist-info': DistInfoDistribution,
}
def issue_warning(*args, **kw):
level = 1
g = globals()
try:
# find the first stack frame that is *not* code in
# the pkg_resources module, to use for the warning
while sys._getframe(level).f_globals is g:
level += 1
except ValueError:
pass
warnings.warn(stacklevel=level + 1, *args, **kw)
class RequirementParseError(ValueError):
def __str__(self):
return ' '.join(self.args)
def parse_requirements(strs):
"""Yield ``Requirement`` objects for each specification in `strs`
`strs` must be a string, or a (possibly-nested) iterable thereof.
"""
# create a steppable iterator, so we can handle \-continuations
lines = iter(yield_lines(strs))
for line in lines:
# Drop comments -- a hash without a space may be in a URL.
if ' #' in line:
line = line[:line.find(' #')]
# If there is a line continuation, drop it, and append the next line.
if line.endswith('\\'):
line = line[:-2].strip()
line += next(lines)
yield Requirement(line)
class Requirement(packaging.requirements.Requirement):
def __init__(self, requirement_string):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
try:
super(Requirement, self).__init__(requirement_string)
except packaging.requirements.InvalidRequirement as e:
raise RequirementParseError(str(e))
self.unsafe_name = self.name
project_name = safe_name(self.name)
self.project_name, self.key = project_name, project_name.lower()
self.specs = [
(spec.operator, spec.version) for spec in self.specifier]
self.extras = tuple(map(safe_extra, self.extras))
self.hashCmp = (
self.key,
self.specifier,
frozenset(self.extras),
str(self.marker) if self.marker else None,
)
self.__hash = hash(self.hashCmp)
def __eq__(self, other):
return (
isinstance(other, Requirement) and
self.hashCmp == other.hashCmp
)
def __ne__(self, other):
return not self == other
def __contains__(self, item):
if isinstance(item, Distribution):
if item.key != self.key:
return False
item = item.version
# Allow prereleases always in order to match the previous behavior of
# this method. In the future this should be smarter and follow PEP 440
# more accurately.
return self.specifier.contains(item, prereleases=True)
def __hash__(self):
return self.__hash
def __repr__(self): return "Requirement.parse(%r)" % str(self)
@staticmethod
def parse(s):
req, = parse_requirements(s)
return req
def _get_mro(cls):
"""Get an mro for a type or classic class"""
if not isinstance(cls, type):
class cls(cls, object):
pass
return cls.__mro__[1:]
return cls.__mro__
def _find_adapter(registry, ob):
"""Return an adapter factory for `ob` from `registry`"""
for t in _get_mro(getattr(ob, '__class__', type(ob))):
if t in registry:
return registry[t]
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def _bypass_ensure_directory(path):
"""Sandbox-bypassing version of ensure_directory()"""
if not WRITE_SUPPORT:
raise IOError('"os.mkdir" not supported on this platform.')
dirname, filename = split(path)
if dirname and filename and not isdir(dirname):
_bypass_ensure_directory(dirname)
mkdir(dirname, 0o755)
def split_sections(s):
"""Split a string or iterable thereof into (section, content) pairs
Each ``section`` is a stripped version of the section header ("[section]")
and each ``content`` is a list of stripped lines excluding blank lines and
comment-only lines. If there are any such lines before the first section
header, they're returned in a first ``section`` of ``None``.
"""
section = None
content = []
for line in yield_lines(s):
if line.startswith("["):
if line.endswith("]"):
if section or content:
yield section, content
section = line[1:-1].strip()
content = []
else:
raise ValueError("Invalid section heading", line)
else:
content.append(line)
# wrap up last segment
yield section, content
def _mkstemp(*args, **kw):
old_open = os.open
try:
# temporarily bypass sandboxing
os.open = os_open
return tempfile.mkstemp(*args, **kw)
finally:
# and then put it back
os.open = old_open
# Silence the PEP440Warning by default, so that end users don't get hit by it
# randomly just because they use pkg_resources. We want to append the rule
# because we want earlier uses of filterwarnings to take precedence over this
# one.
warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
# from jaraco.functools 1.3
def _call_aside(f, *args, **kwargs):
f(*args, **kwargs)
return f
@_call_aside
def _initialize(g=globals()):
"Set up global resource manager (deliberately not state-saved)"
manager = ResourceManager()
g['_manager'] = manager
for name in dir(manager):
if not name.startswith('_'):
g[name] = getattr(manager, name)
@_call_aside
def _initialize_master_working_set():
"""
Prepare the master working set and make the ``require()``
API available.
This function has explicit effects on the global state
of pkg_resources. It is intended to be invoked once at
the initialization of this module.
Invocation by other packages is unsupported and done
at their own risk.
"""
working_set = WorkingSet._build_master()
_declare_state('object', working_set=working_set)
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
# backward compatibility
run_main = run_script
# Activate all distributions already on sys.path with replace=False and
# ensure that all distributions added to the working set in the future
# (e.g. by calling ``require()``) will get activated as well,
# with higher priority (replace=True).
dist = None # ensure dist is defined for del dist below
for dist in working_set:
dist.activate(replace=False)
del dist
add_activation_listener(lambda dist: dist.activate(replace=True), existing=False)
working_set.entries = []
# match order
list(map(working_set.add_entry, sys.path))
globals().update(locals())
|
fafaman/django
|
refs/heads/master
|
tests/serializers/tests.py
|
61
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import importlib
import json
import re
import unittest
from datetime import datetime
from xml.dom import minidom
from django.core import management, serializers
from django.core.serializers.base import ProgressBar
from django.db import connection, transaction
from django.test import (
SimpleTestCase, TestCase, TransactionTestCase, mock, override_settings,
skipUnlessDBFeature,
)
from django.test.utils import Approximate
from django.utils import six
from django.utils.six import StringIO
from .models import (
Actor, Article, Author, AuthorProfile, Category, Movie, Player, Score,
Team,
)
try:
import yaml
HAS_YAML = True
except ImportError:
HAS_YAML = False
@override_settings(
SERIALIZATION_MODULES={
"json2": "django.core.serializers.json",
}
)
class SerializerRegistrationTests(SimpleTestCase):
def setUp(self):
self.old_serializers = serializers._serializers
serializers._serializers = {}
def tearDown(self):
serializers._serializers = self.old_serializers
def test_register(self):
"Registering a new serializer populates the full registry. Refs #14823"
serializers.register_serializer('json3', 'django.core.serializers.json')
public_formats = serializers.get_public_serializer_formats()
self.assertIn('json3', public_formats)
self.assertIn('json2', public_formats)
self.assertIn('xml', public_formats)
def test_unregister(self):
"Unregistering a serializer doesn't cause the registry to be repopulated. Refs #14823"
serializers.unregister_serializer('xml')
serializers.register_serializer('json3', 'django.core.serializers.json')
public_formats = serializers.get_public_serializer_formats()
self.assertNotIn('xml', public_formats)
self.assertIn('json3', public_formats)
def test_builtin_serializers(self):
"Requesting a list of serializer formats popuates the registry"
all_formats = set(serializers.get_serializer_formats())
public_formats = set(serializers.get_public_serializer_formats())
self.assertIn('xml', all_formats),
self.assertIn('xml', public_formats)
self.assertIn('json2', all_formats)
self.assertIn('json2', public_formats)
self.assertIn('python', all_formats)
self.assertNotIn('python', public_formats)
class SerializersTestBase(object):
@staticmethod
def _comparison_value(value):
return value
def setUp(self):
sports = Category.objects.create(name="Sports")
music = Category.objects.create(name="Music")
op_ed = Category.objects.create(name="Op-Ed")
self.joe = Author.objects.create(name="Joe")
self.jane = Author.objects.create(name="Jane")
self.a1 = Article(
author=self.jane,
headline="Poker has no place on ESPN",
pub_date=datetime(2006, 6, 16, 11, 00)
)
self.a1.save()
self.a1.categories = [sports, op_ed]
self.a2 = Article(
author=self.joe,
headline="Time to reform copyright",
pub_date=datetime(2006, 6, 16, 13, 00, 11, 345)
)
self.a2.save()
self.a2.categories = [music, op_ed]
def test_serialize(self):
"""Tests that basic serialization works."""
serial_str = serializers.serialize(self.serializer_name,
Article.objects.all())
self.assertTrue(self._validate_output(serial_str))
def test_serializer_roundtrip(self):
"""Tests that serialized content can be deserialized."""
serial_str = serializers.serialize(self.serializer_name,
Article.objects.all())
models = list(serializers.deserialize(self.serializer_name, serial_str))
self.assertEqual(len(models), 2)
def test_altering_serialized_output(self):
"""
Tests the ability to create new objects by
modifying serialized content.
"""
old_headline = "Poker has no place on ESPN"
new_headline = "Poker has no place on television"
serial_str = serializers.serialize(self.serializer_name,
Article.objects.all())
serial_str = serial_str.replace(old_headline, new_headline)
models = list(serializers.deserialize(self.serializer_name, serial_str))
# Prior to saving, old headline is in place
self.assertTrue(Article.objects.filter(headline=old_headline))
self.assertFalse(Article.objects.filter(headline=new_headline))
for model in models:
model.save()
# After saving, new headline is in place
self.assertTrue(Article.objects.filter(headline=new_headline))
self.assertFalse(Article.objects.filter(headline=old_headline))
def test_one_to_one_as_pk(self):
"""
Tests that if you use your own primary key field
(such as a OneToOneField), it doesn't appear in the
serialized field list - it replaces the pk identifier.
"""
profile = AuthorProfile(author=self.joe,
date_of_birth=datetime(1970, 1, 1))
profile.save()
serial_str = serializers.serialize(self.serializer_name,
AuthorProfile.objects.all())
self.assertFalse(self._get_field_values(serial_str, 'author'))
for obj in serializers.deserialize(self.serializer_name, serial_str):
self.assertEqual(obj.object.pk, self._comparison_value(self.joe.pk))
def test_serialize_field_subset(self):
"""Tests that output can be restricted to a subset of fields"""
valid_fields = ('headline', 'pub_date')
invalid_fields = ("author", "categories")
serial_str = serializers.serialize(self.serializer_name,
Article.objects.all(),
fields=valid_fields)
for field_name in invalid_fields:
self.assertFalse(self._get_field_values(serial_str, field_name))
for field_name in valid_fields:
self.assertTrue(self._get_field_values(serial_str, field_name))
def test_serialize_unicode(self):
"""Tests that unicode makes the roundtrip intact"""
actor_name = "Za\u017c\u00f3\u0142\u0107"
movie_title = 'G\u0119\u015bl\u0105 ja\u017a\u0144'
ac = Actor(name=actor_name)
mv = Movie(title=movie_title, actor=ac)
ac.save()
mv.save()
serial_str = serializers.serialize(self.serializer_name, [mv])
self.assertEqual(self._get_field_values(serial_str, "title")[0], movie_title)
self.assertEqual(self._get_field_values(serial_str, "actor")[0], actor_name)
obj_list = list(serializers.deserialize(self.serializer_name, serial_str))
mv_obj = obj_list[0].object
self.assertEqual(mv_obj.title, movie_title)
def test_serialize_progressbar(self):
fake_stdout = StringIO()
serializers.serialize(
self.serializer_name, Article.objects.all(),
progress_output=fake_stdout, object_count=Article.objects.count()
)
self.assertTrue(
fake_stdout.getvalue().endswith('[' + '.' * ProgressBar.progress_width + ']\n')
)
def test_serialize_superfluous_queries(self):
"""Ensure no superfluous queries are made when serializing ForeignKeys
#17602
"""
ac = Actor(name='Actor name')
ac.save()
mv = Movie(title='Movie title', actor_id=ac.pk)
mv.save()
with self.assertNumQueries(0):
serializers.serialize(self.serializer_name, [mv])
def test_serialize_with_null_pk(self):
"""
Tests that serialized data with no primary key results
in a model instance with no id
"""
category = Category(name="Reference")
serial_str = serializers.serialize(self.serializer_name, [category])
pk_value = self._get_pk_values(serial_str)[0]
self.assertFalse(pk_value)
cat_obj = list(serializers.deserialize(self.serializer_name,
serial_str))[0].object
self.assertEqual(cat_obj.id, None)
def test_float_serialization(self):
"""Tests that float values serialize and deserialize intact"""
sc = Score(score=3.4)
sc.save()
serial_str = serializers.serialize(self.serializer_name, [sc])
deserial_objs = list(serializers.deserialize(self.serializer_name,
serial_str))
self.assertEqual(deserial_objs[0].object.score, Approximate(3.4, places=1))
def test_deferred_field_serialization(self):
author = Author.objects.create(name='Victor Hugo')
author = Author.objects.defer('name').get(pk=author.pk)
serial_str = serializers.serialize(self.serializer_name, [author])
deserial_objs = list(serializers.deserialize(self.serializer_name, serial_str))
# Check the class instead of using isinstance() because model instances
# with deferred fields (e.g. Author_Deferred_name) will pass isinstance.
self.assertEqual(deserial_objs[0].object.__class__, Author)
def test_custom_field_serialization(self):
"""Tests that custom fields serialize and deserialize intact"""
team_str = "Spartak Moskva"
player = Player()
player.name = "Soslan Djanaev"
player.rank = 1
player.team = Team(team_str)
player.save()
serial_str = serializers.serialize(self.serializer_name,
Player.objects.all())
team = self._get_field_values(serial_str, "team")
self.assertTrue(team)
self.assertEqual(team[0], team_str)
deserial_objs = list(serializers.deserialize(self.serializer_name, serial_str))
self.assertEqual(deserial_objs[0].object.team.to_string(),
player.team.to_string())
def test_pre_1000ad_date(self):
"""Tests that year values before 1000AD are properly formatted"""
# Regression for #12524 -- dates before 1000AD get prefixed
# 0's on the year
a = Article.objects.create(
author=self.jane,
headline="Nobody remembers the early years",
pub_date=datetime(1, 2, 3, 4, 5, 6))
serial_str = serializers.serialize(self.serializer_name, [a])
date_values = self._get_field_values(serial_str, "pub_date")
self.assertEqual(date_values[0].replace('T', ' '), "0001-02-03 04:05:06")
def test_pkless_serialized_strings(self):
"""
Tests that serialized strings without PKs
can be turned into models
"""
deserial_objs = list(serializers.deserialize(self.serializer_name,
self.pkless_str))
for obj in deserial_objs:
self.assertFalse(obj.object.id)
obj.save()
self.assertEqual(Category.objects.all().count(), 5)
def test_deterministic_mapping_ordering(self):
"""Mapping such as fields should be deterministically ordered. (#24558)"""
output = serializers.serialize(self.serializer_name, [self.a1], indent=2)
categories = self.a1.categories.values_list('pk', flat=True)
self.assertEqual(output, self.mapping_ordering_str % {
'article_pk': self.a1.pk,
'author_pk': self.a1.author_id,
'first_category_pk': categories[0],
'second_category_pk': categories[1],
})
def test_deserialize_force_insert(self):
"""Tests that deserialized content can be saved with force_insert as a parameter."""
serial_str = serializers.serialize(self.serializer_name, [self.a1])
deserial_obj = list(serializers.deserialize(self.serializer_name, serial_str))[0]
with mock.patch('django.db.models.Model') as mock_model:
deserial_obj.save(force_insert=False)
mock_model.save_base.assert_called_with(deserial_obj.object, raw=True, using=None, force_insert=False)
class SerializersTransactionTestBase(object):
available_apps = ['serializers']
@skipUnlessDBFeature('supports_forward_references')
def test_forward_refs(self):
"""
Tests that objects ids can be referenced before they are
defined in the serialization data.
"""
# The deserialization process needs to run in a transaction in order
# to test forward reference handling.
with transaction.atomic():
objs = serializers.deserialize(self.serializer_name, self.fwd_ref_str)
with connection.constraint_checks_disabled():
for obj in objs:
obj.save()
for model_cls in (Category, Author, Article):
self.assertEqual(model_cls.objects.all().count(), 1)
art_obj = Article.objects.all()[0]
self.assertEqual(art_obj.categories.all().count(), 1)
self.assertEqual(art_obj.author.name, "Agnes")
class XmlSerializerTestCase(SerializersTestBase, TestCase):
serializer_name = "xml"
pkless_str = """<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0">
<object model="serializers.category">
<field type="CharField" name="name">Reference</field>
</object>
<object model="serializers.category">
<field type="CharField" name="name">Non-fiction</field>
</object>
</django-objects>"""
mapping_ordering_str = """<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0">
<object model="serializers.article" pk="%(article_pk)s">
<field name="author" rel="ManyToOneRel" to="serializers.author">%(author_pk)s</field>
<field name="headline" type="CharField">Poker has no place on ESPN</field>
<field name="pub_date" type="DateTimeField">2006-06-16T11:00:00</field>
<field name="categories" rel="ManyToManyRel" to="serializers.category"><object pk="%(first_category_pk)s"></object><object pk="%(second_category_pk)s"></object></field>
<field name="meta_data" rel="ManyToManyRel" to="serializers.categorymetadata"></field>
</object>
</django-objects>""" # NOQA
@staticmethod
def _comparison_value(value):
# The XML serializer handles everything as strings, so comparisons
# need to be performed on the stringified value
return six.text_type(value)
@staticmethod
def _validate_output(serial_str):
try:
minidom.parseString(serial_str)
except Exception:
return False
else:
return True
@staticmethod
def _get_pk_values(serial_str):
ret_list = []
dom = minidom.parseString(serial_str)
fields = dom.getElementsByTagName("object")
for field in fields:
ret_list.append(field.getAttribute("pk"))
return ret_list
@staticmethod
def _get_field_values(serial_str, field_name):
ret_list = []
dom = minidom.parseString(serial_str)
fields = dom.getElementsByTagName("field")
for field in fields:
if field.getAttribute("name") == field_name:
temp = []
for child in field.childNodes:
temp.append(child.nodeValue)
ret_list.append("".join(temp))
return ret_list
def test_control_char_failure(self):
"""
Serializing control characters with XML should fail as those characters
are not supported in the XML 1.0 standard (except HT, LF, CR).
"""
self.a1.headline = "This contains \u0001 control \u0011 chars"
msg = "Article.headline (pk:%s) contains unserializable characters" % self.a1.pk
with self.assertRaisesMessage(ValueError, msg):
serializers.serialize(self.serializer_name, [self.a1])
self.a1.headline = "HT \u0009, LF \u000A, and CR \u000D are allowed"
self.assertIn(
"HT \t, LF \n, and CR \r are allowed",
serializers.serialize(self.serializer_name, [self.a1])
)
class XmlSerializerTransactionTestCase(SerializersTransactionTestBase, TransactionTestCase):
serializer_name = "xml"
fwd_ref_str = """<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0">
<object pk="1" model="serializers.article">
<field to="serializers.author" name="author" rel="ManyToOneRel">1</field>
<field type="CharField" name="headline">Forward references pose no problem</field>
<field type="DateTimeField" name="pub_date">2006-06-16T15:00:00</field>
<field to="serializers.category" name="categories" rel="ManyToManyRel">
<object pk="1"></object>
</field>
<field to="serializers.categorymetadata" name="meta_data" rel="ManyToManyRel"></field>
</object>
<object pk="1" model="serializers.author">
<field type="CharField" name="name">Agnes</field>
</object>
<object pk="1" model="serializers.category">
<field type="CharField" name="name">Reference</field></object>
</django-objects>"""
class JsonSerializerTestCase(SerializersTestBase, TestCase):
serializer_name = "json"
pkless_str = """[
{
"pk": null,
"model": "serializers.category",
"fields": {"name": "Reference"}
}, {
"model": "serializers.category",
"fields": {"name": "Non-fiction"}
}]"""
mapping_ordering_str = """[
{
"model": "serializers.article",
"pk": %(article_pk)s,
"fields": {
"author": %(author_pk)s,
"headline": "Poker has no place on ESPN",
"pub_date": "2006-06-16T11:00:00",
"categories": [
%(first_category_pk)s,
%(second_category_pk)s
],
"meta_data": []
}
}
]
"""
@staticmethod
def _validate_output(serial_str):
try:
json.loads(serial_str)
except Exception:
return False
else:
return True
@staticmethod
def _get_pk_values(serial_str):
ret_list = []
serial_list = json.loads(serial_str)
for obj_dict in serial_list:
ret_list.append(obj_dict["pk"])
return ret_list
@staticmethod
def _get_field_values(serial_str, field_name):
ret_list = []
serial_list = json.loads(serial_str)
for obj_dict in serial_list:
if field_name in obj_dict["fields"]:
ret_list.append(obj_dict["fields"][field_name])
return ret_list
def test_indentation_whitespace(self):
Score.objects.create(score=5.0)
Score.objects.create(score=6.0)
qset = Score.objects.all()
s = serializers.json.Serializer()
json_data = s.serialize(qset, indent=2)
for line in json_data.splitlines():
if re.search(r'.+,\s*$', line):
self.assertEqual(line, line.rstrip())
def test_helpful_error_message_invalid_pk(self):
"""
If there is an invalid primary key, the error message should contain
the model associated with it.
"""
test_string = """[{
"pk": "badpk",
"model": "serializers.player",
"fields": {
"name": "Bob",
"rank": 1,
"team": "Team"
}
}]"""
with self.assertRaisesMessage(serializers.base.DeserializationError, "(serializers.player:pk=badpk)"):
list(serializers.deserialize('json', test_string))
def test_helpful_error_message_invalid_field(self):
"""
If there is an invalid field value, the error message should contain
the model associated with it.
"""
test_string = """[{
"pk": "1",
"model": "serializers.player",
"fields": {
"name": "Bob",
"rank": "invalidint",
"team": "Team"
}
}]"""
expected = "(serializers.player:pk=1) field_value was 'invalidint'"
with self.assertRaisesMessage(serializers.base.DeserializationError, expected):
list(serializers.deserialize('json', test_string))
def test_helpful_error_message_for_foreign_keys(self):
"""
Invalid foreign keys with a natural key should throw a helpful error
message, such as what the failing key is.
"""
test_string = """[{
"pk": 1,
"model": "serializers.category",
"fields": {
"name": "Unknown foreign key",
"meta_data": [
"doesnotexist",
"metadata"
]
}
}]"""
key = ["doesnotexist", "metadata"]
expected = "(serializers.category:pk=1) field_value was '%r'" % key
with self.assertRaisesMessage(serializers.base.DeserializationError, expected):
list(serializers.deserialize('json', test_string))
def test_helpful_error_message_for_many2many_non_natural(self):
"""
Invalid many-to-many keys should throw a helpful error message.
"""
test_string = """[{
"pk": 1,
"model": "serializers.article",
"fields": {
"author": 1,
"headline": "Unknown many to many",
"pub_date": "2014-09-15T10:35:00",
"categories": [1, "doesnotexist"]
}
}, {
"pk": 1,
"model": "serializers.author",
"fields": {
"name": "Agnes"
}
}, {
"pk": 1,
"model": "serializers.category",
"fields": {
"name": "Reference"
}
}]"""
expected = "(serializers.article:pk=1) field_value was 'doesnotexist'"
with self.assertRaisesMessage(serializers.base.DeserializationError, expected):
list(serializers.deserialize('json', test_string))
def test_helpful_error_message_for_many2many_natural1(self):
"""
Invalid many-to-many keys should throw a helpful error message.
This tests the code path where one of a list of natural keys is invalid.
"""
test_string = """[{
"pk": 1,
"model": "serializers.categorymetadata",
"fields": {
"kind": "author",
"name": "meta1",
"value": "Agnes"
}
}, {
"pk": 1,
"model": "serializers.article",
"fields": {
"author": 1,
"headline": "Unknown many to many",
"pub_date": "2014-09-15T10:35:00",
"meta_data": [
["author", "meta1"],
["doesnotexist", "meta1"],
["author", "meta1"]
]
}
}, {
"pk": 1,
"model": "serializers.author",
"fields": {
"name": "Agnes"
}
}]"""
key = ["doesnotexist", "meta1"]
expected = "(serializers.article:pk=1) field_value was '%r'" % key
with self.assertRaisesMessage(serializers.base.DeserializationError, expected):
for obj in serializers.deserialize('json', test_string):
obj.save()
def test_helpful_error_message_for_many2many_natural2(self):
"""
Invalid many-to-many keys should throw a helpful error message. This
tests the code path where a natural many-to-many key has only a single
value.
"""
test_string = """[{
"pk": 1,
"model": "serializers.article",
"fields": {
"author": 1,
"headline": "Unknown many to many",
"pub_date": "2014-09-15T10:35:00",
"meta_data": [1, "doesnotexist"]
}
}, {
"pk": 1,
"model": "serializers.categorymetadata",
"fields": {
"kind": "author",
"name": "meta1",
"value": "Agnes"
}
}, {
"pk": 1,
"model": "serializers.author",
"fields": {
"name": "Agnes"
}
}]"""
expected = "(serializers.article:pk=1) field_value was 'doesnotexist'"
with self.assertRaisesMessage(serializers.base.DeserializationError, expected):
for obj in serializers.deserialize('json', test_string, ignore=False):
obj.save()
class JsonSerializerTransactionTestCase(SerializersTransactionTestBase, TransactionTestCase):
serializer_name = "json"
fwd_ref_str = """[
{
"pk": 1,
"model": "serializers.article",
"fields": {
"headline": "Forward references pose no problem",
"pub_date": "2006-06-16T15:00:00",
"categories": [1],
"author": 1
}
},
{
"pk": 1,
"model": "serializers.category",
"fields": {
"name": "Reference"
}
},
{
"pk": 1,
"model": "serializers.author",
"fields": {
"name": "Agnes"
}
}]"""
YAML_IMPORT_ERROR_MESSAGE = r'No module named yaml'
class YamlImportModuleMock(object):
"""Provides a wrapped import_module function to simulate yaml ImportError
In order to run tests that verify the behavior of the YAML serializer
when run on a system that has yaml installed (like the django CI server),
mock import_module, so that it raises an ImportError when the yaml
serializer is being imported. The importlib.import_module() call is
being made in the serializers.register_serializer().
Refs: #12756
"""
def __init__(self):
self._import_module = importlib.import_module
def import_module(self, module_path):
if module_path == serializers.BUILTIN_SERIALIZERS['yaml']:
raise ImportError(YAML_IMPORT_ERROR_MESSAGE)
return self._import_module(module_path)
class NoYamlSerializerTestCase(SimpleTestCase):
"""Not having pyyaml installed provides a misleading error
Refs: #12756
"""
@classmethod
def setUpClass(cls):
"""Removes imported yaml and stubs importlib.import_module"""
super(NoYamlSerializerTestCase, cls).setUpClass()
cls._import_module_mock = YamlImportModuleMock()
importlib.import_module = cls._import_module_mock.import_module
# clear out cached serializers to emulate yaml missing
serializers._serializers = {}
@classmethod
def tearDownClass(cls):
"""Puts yaml back if necessary"""
super(NoYamlSerializerTestCase, cls).tearDownClass()
importlib.import_module = cls._import_module_mock._import_module
# clear out cached serializers to clean out BadSerializer instances
serializers._serializers = {}
def test_serializer_pyyaml_error_message(self):
"""Using yaml serializer without pyyaml raises ImportError"""
jane = Author(name="Jane")
self.assertRaises(ImportError, serializers.serialize, "yaml", [jane])
def test_deserializer_pyyaml_error_message(self):
"""Using yaml deserializer without pyyaml raises ImportError"""
self.assertRaises(ImportError, serializers.deserialize, "yaml", "")
def test_dumpdata_pyyaml_error_message(self):
"""Calling dumpdata produces an error when yaml package missing"""
with six.assertRaisesRegex(self, management.CommandError, YAML_IMPORT_ERROR_MESSAGE):
management.call_command('dumpdata', format='yaml')
@unittest.skipUnless(HAS_YAML, "No yaml library detected")
class YamlSerializerTestCase(SerializersTestBase, TestCase):
serializer_name = "yaml"
fwd_ref_str = """- fields:
headline: Forward references pose no problem
pub_date: 2006-06-16 15:00:00
categories: [1]
author: 1
pk: 1
model: serializers.article
- fields:
name: Reference
pk: 1
model: serializers.category
- fields:
name: Agnes
pk: 1
model: serializers.author"""
pkless_str = """- fields:
name: Reference
pk: null
model: serializers.category
- fields:
name: Non-fiction
model: serializers.category"""
mapping_ordering_str = """- model: serializers.article
pk: %(article_pk)s
fields:
author: %(author_pk)s
headline: Poker has no place on ESPN
pub_date: 2006-06-16 11:00:00
categories: [%(first_category_pk)s, %(second_category_pk)s]
meta_data: []
"""
@staticmethod
def _validate_output(serial_str):
try:
yaml.safe_load(StringIO(serial_str))
except Exception:
return False
else:
return True
@staticmethod
def _get_pk_values(serial_str):
ret_list = []
stream = StringIO(serial_str)
for obj_dict in yaml.safe_load(stream):
ret_list.append(obj_dict["pk"])
return ret_list
@staticmethod
def _get_field_values(serial_str, field_name):
ret_list = []
stream = StringIO(serial_str)
for obj_dict in yaml.safe_load(stream):
if "fields" in obj_dict and field_name in obj_dict["fields"]:
field_value = obj_dict["fields"][field_name]
# yaml.safe_load will return non-string objects for some
# of the fields we are interested in, this ensures that
# everything comes back as a string
if isinstance(field_value, six.string_types):
ret_list.append(field_value)
else:
ret_list.append(str(field_value))
return ret_list
@unittest.skipUnless(HAS_YAML, "No yaml library detected")
class YamlSerializerTransactionTestCase(SerializersTransactionTestBase, TransactionTestCase):
serializer_name = "yaml"
fwd_ref_str = """- fields:
headline: Forward references pose no problem
pub_date: 2006-06-16 15:00:00
categories: [1]
author: 1
pk: 1
model: serializers.article
- fields:
name: Reference
pk: 1
model: serializers.category
- fields:
name: Agnes
pk: 1
model: serializers.author"""
|
JazzeYoung/VeryDeepAutoEncoder
|
refs/heads/master
|
pylearn2/pylearn2/datasets/hdf5_deprecated.py
|
30
|
"""
Objects for datasets serialized in HDF5 format (.h5).
"""
__author__ = "Steven Kearnes"
__copyright__ = "Copyright 2014, Stanford University"
__license__ = "3-clause BSD"
try:
import h5py
except ImportError:
h5py = None
import numpy as np
from theano.compat.six.moves import xrange
import warnings
from pylearn2.datasets.dense_design_matrix import (DenseDesignMatrix,
DefaultViewConverter)
from pylearn2.space import CompositeSpace, VectorSpace, IndexSpace
from pylearn2.utils.iteration import FiniteDatasetIterator, safe_izip
from pylearn2.utils import contains_nan
class HDF5DatasetDeprecated(DenseDesignMatrix):
"""
Dense dataset loaded from an HDF5 file.
Parameters
----------
filename : str
HDF5 file name.
X : str, optional
Key into HDF5 file for dataset design matrix.
topo_view: str, optional
Key into HDF5 file for topological view of dataset.
y : str, optional
Key into HDF5 file for dataset targets.
load_all : bool, optional (default False)
If true, datasets are loaded into memory instead of being left
on disk.
cache_size: int, optionally specify the size in bytes for the chunk
cache of the HDF5 library. Useful when the HDF5 files has large
chunks and when using a sequantial iterator. The chunk cache allows
to only access the disk for the chunks and then copy the batches to
the GPU from memory, which can result in a significant speed up.
Sensible default values depend on the size of your data and the
batch size you wish to use. A rule of thumb is to make a chunk
contain 100 - 1000 batches and make sure they encompass complete
samples.
kwargs : dict, optional
Keyword arguments passed to `DenseDesignMatrix`.
"""
def __init__(self, filename, X=None, topo_view=None, y=None,
load_all=False, cache_size=None, **kwargs):
self.load_all = load_all
if h5py is None:
raise RuntimeError("Could not import h5py.")
if cache_size:
propfaid = h5py.h5p.create(h5py.h5p.FILE_ACCESS)
settings = list(propfaid.get_cache())
settings[2] = cache_size
propfaid.set_cache(*settings)
fid = h5py.h5f.open(filename, fapl=propfaid)
self._file = h5py.File(fid)
else:
self._file = h5py.File(filename)
if X is not None:
X = self.get_dataset(X, load_all)
if topo_view is not None:
topo_view = self.get_dataset(topo_view, load_all)
if y is not None:
y = self.get_dataset(y, load_all)
super(HDF5DatasetDeprecated, self).__init__(X=X, topo_view=topo_view,
y=y, **kwargs)
def _check_labels(self):
"""
Sanity checks for X_labels and y_labels.
Since the np.all test used for these labels does not work with HDF5
datasets, we issue a warning that those values are not checked.
"""
if self.X_labels is not None:
assert self.X is not None
assert self.view_converter is None
assert self.X.ndim <= 2
if self.load_all:
assert np.all(self.X < self.X_labels)
else:
warnings.warn("HDF5Dataset cannot perform test np.all(X < " +
"X_labels). Use X_labels at your own risk.")
if self.y_labels is not None:
assert self.y is not None
assert self.y.ndim <= 2
if self.load_all:
assert np.all(self.y < self.y_labels)
else:
warnings.warn("HDF5Dataset cannot perform test np.all(y < " +
"y_labels). Use y_labels at your own risk.")
def get_dataset(self, dataset, load_all=False):
"""
Get a handle for an HDF5 dataset, or load the entire dataset into
memory.
Parameters
----------
dataset : str
Name or path of HDF5 dataset.
load_all : bool, optional (default False)
If true, load dataset into memory.
"""
if load_all:
data = self._file[dataset][:]
else:
data = self._file[dataset]
data.ndim = len(data.shape) # hdf5 handle has no ndim
return data
def iterator(self, *args, **kwargs):
"""
Get an iterator for this dataset.
The FiniteDatasetIterator uses indexing that is not supported by
HDF5 datasets, so we change the class to HDF5DatasetIterator to
override the iterator.next method used in dataset iteration.
Parameters
----------
WRITEME
"""
iterator = super(HDF5DatasetDeprecated, self).iterator(*args, **kwargs)
iterator.__class__ = HDF5DatasetIterator
return iterator
def set_topological_view(self, V, axes=('b', 0, 1, 'c')):
"""
Set up dataset topological view, without building an in-memory
design matrix.
This is mostly copied from DenseDesignMatrix, except:
* HDF5ViewConverter is used instead of DefaultViewConverter
* Data specs are derived from topo_view, not X
* NaN checks have been moved to HDF5DatasetIterator.next
Note that y may be loaded into memory for reshaping if y.ndim != 2.
Parameters
----------
V : ndarray
Topological view.
axes : tuple, optional (default ('b', 0, 1, 'c'))
Order of axes in topological view.
"""
shape = [V.shape[axes.index('b')],
V.shape[axes.index(0)],
V.shape[axes.index(1)],
V.shape[axes.index('c')]]
self.view_converter = HDF5ViewConverter(shape[1:], axes=axes)
self.X = self.view_converter.topo_view_to_design_mat(V)
# self.X_topo_space stores a "default" topological space that
# will be used only when self.iterator is called without a
# data_specs, and with "topo=True", which is deprecated.
self.X_topo_space = self.view_converter.topo_space
# Update data specs
X_space = VectorSpace(dim=V.shape[axes.index('b')])
X_source = 'features'
if self.y is None:
space = X_space
source = X_source
else:
if self.y.ndim == 1:
dim = 1
else:
dim = self.y.shape[-1]
# check if y_labels has been specified
if getattr(self, 'y_labels', None) is not None:
y_space = IndexSpace(dim=dim, max_labels=self.y_labels)
elif getattr(self, 'max_labels', None) is not None:
y_space = IndexSpace(dim=dim, max_labels=self.max_labels)
else:
y_space = VectorSpace(dim=dim)
y_source = 'targets'
space = CompositeSpace((X_space, y_space))
source = (X_source, y_source)
self.data_specs = (space, source)
self.X_space = X_space
self._iter_data_specs = (X_space, X_source)
class HDF5DatasetIterator(FiniteDatasetIterator):
"""
Dataset iterator for HDF5 datasets.
FiniteDatasetIterator expects a design matrix to be available, but this
will not always be the case when using HDF5 datasets with topological
views.
Parameters
----------
dataset : Dataset
Dataset over which to iterate.
subset_iterator : object
Iterator that returns slices of the dataset.
data_specs : tuple, optional
A (space, source) tuple.
return_tuple : bool, optional (default False)
Whether to return a tuple even if only one source is used.
convert : list, optional
A list of callables (in the same order as the sources in
data_specs) that will be applied to each slice of the dataset.
"""
def next(self):
"""
Get the next subset of the dataset during dataset iteration.
Converts index selections for batches to boolean selections that
are supported by HDF5 datasets.
"""
next_index = self._subset_iterator.next()
# convert to boolean selection
sel = np.zeros(self.num_examples, dtype=bool)
sel[next_index] = True
next_index = sel
rval = []
for data, fn in safe_izip(self._raw_data, self._convert):
try:
this_data = data[next_index]
except TypeError:
# FB: Why this try..except is there? I think this is useless.
# Do not hide the original if we can't fall back.
# FV: This is triggered if the shape of next_index is
# incompatible with the shape of the dataset. See for an
# example test_hdf5_topo_view(), where where i
# next.index.shape = (10,) and data is 'data': <HDF5
# dataset "y": shape (10, 3), type "<f8">
# I think it would be better to explicitly check if
# next_index.shape is incompatible with data.shape, for
# instance checking if next_index.ndim == data.ndim
if data.ndim > 1:
this_data = data[next_index, :]
else:
raise
# Check if the dataset data is a vector and transform it into a
# one-column matrix. This is needed to automatically convert the
# shape of the data later (in the format_as method of the
# Space.)
if fn:
this_data = fn(this_data)
assert not contains_nan(this_data)
rval.append(this_data)
rval = tuple(rval)
if not self._return_tuple and len(rval) == 1:
rval, = rval
return rval
class HDF5ViewConverter(DefaultViewConverter):
"""
View converter that doesn't have to transpose the data.
In order to keep data on disk, does not generate a full design matrix.
Instead, an instance of HDF5TopoViewConverter is returned, which
transforms data from the topological view into the design view for each
batch.
Parameters
----------
shape : tuple
Shape of this view.
axes : tuple, optional (default ('b', 0, 1, 'c'))
Order of axes in topological view.
"""
def topo_view_to_design_mat(self, V):
"""
Generate a design matrix from the topological view.
This override of DefaultViewConverter.topo_view_to_design_mat does
not attempt to transpose the topological view, since transposition
is not supported by HDF5 datasets.
Parameters
----------
WRITEME
"""
v_shape = (V.shape[self.axes.index('b')],
V.shape[self.axes.index(0)],
V.shape[self.axes.index(1)],
V.shape[self.axes.index('c')])
if np.any(np.asarray(self.shape) != np.asarray(v_shape[1:])):
raise ValueError('View converter for views of shape batch size '
'followed by ' + str(self.shape) +
' given tensor of shape ' + str(v_shape))
rval = HDF5TopoViewConverter(V, self.axes)
return rval
class HDF5TopoViewConverter(object):
"""
Class for transforming batches from the topological view to the design
matrix view.
Parameters
----------
topo_view : HDF5 dataset
On-disk topological view.
axes : tuple, optional (default ('b', 0, 1, 'c'))
Order of axes in topological view.
"""
def __init__(self, topo_view, axes=('b', 0, 1, 'c')):
self.topo_view = topo_view
self.axes = axes
self.topo_view_shape = (topo_view.shape[axes.index('b')],
topo_view.shape[axes.index(0)],
topo_view.shape[axes.index(1)],
topo_view.shape[axes.index('c')])
self.pixels_per_channel = (self.topo_view_shape[1] *
self.topo_view_shape[2])
self.n_channels = self.topo_view_shape[3]
self.shape = (self.topo_view_shape[0],
np.product(self.topo_view_shape[1:]))
self.ndim = len(self.shape)
def __getitem__(self, item):
"""
Indexes the design matrix and transforms the requested batch from
the topological view.
Parameters
----------
item : slice or ndarray
Batch selection. Either a slice or a boolean mask.
"""
sel = [slice(None)] * len(self.topo_view_shape)
sel[self.axes.index('b')] = item
sel = tuple(sel)
V = self.topo_view[sel]
batch_size = V.shape[self.axes.index('b')]
rval = np.zeros((batch_size,
self.pixels_per_channel * self.n_channels),
dtype=V.dtype)
for i in xrange(self.n_channels):
ppc = self.pixels_per_channel
sel = [slice(None)] * len(V.shape)
sel[self.axes.index('c')] = i
sel = tuple(sel)
rval[:, i * ppc:(i + 1) * ppc] = V[sel].reshape(batch_size, ppc)
return rval
|
nmabhi/Webface
|
refs/heads/master
|
tests/openface_demo_tests.py
|
5
|
# OpenFace demo tests.
#
# Copyright 2015-2016 Carnegie Mellon University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import shutil
import tempfile
import sys
from subprocess import Popen, PIPE
openfaceDir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
exampleImages = os.path.join(openfaceDir, 'images', 'examples')
lfwSubset = os.path.join(openfaceDir, 'data', 'lfw-subset')
def test_compare_demo():
cmd = [sys.executable, os.path.join(openfaceDir, 'demos', 'compare.py'),
os.path.join(exampleImages, 'lennon-1.jpg'),
os.path.join(exampleImages, 'lennon-2.jpg')]
p = Popen(cmd, stdout=PIPE, stderr=PIPE, universal_newlines=True)
(out, err) = p.communicate()
print(out)
print(err)
assert '0.763' in out
def test_classification_demo_pretrained():
cmd = [sys.executable, os.path.join(openfaceDir, 'demos', 'classifier.py'),
'infer',
os.path.join(openfaceDir, 'models', 'openface',
'celeb-classifier.nn4.small2.v1.pkl'),
os.path.join(exampleImages, 'carell.jpg')]
p = Popen(cmd, stdout=PIPE, stderr=PIPE, universal_newlines=True)
(out, err) = p.communicate()
print(out)
print(err)
assert 'Predict SteveCarell with 0.97 confidence.' in out
def test_classification_demo_pretrained_multi():
cmd = [sys.executable, os.path.join(openfaceDir, 'demos', 'classifier.py'),
'infer', '--multi',
os.path.join(openfaceDir, 'models', 'openface',
'celeb-classifier.nn4.small2.v1.pkl'),
os.path.join(exampleImages, 'longoria-cooper.jpg')]
p = Popen(cmd, stdout=PIPE, stderr=PIPE, universal_newlines=True)
(out, err) = p.communicate()
print(out)
print(err)
assert 'Predict EvaLongoria @ x=91 with 0.99 confidence.' in out
assert 'Predict BradleyCooper @ x=191 with 0.99 confidence.' in out
def test_classification_demo_training():
assert os.path.isdir(lfwSubset), 'Get lfw-subset by running ./data/download-lfw-subset.sh'
workDir = tempfile.mkdtemp(prefix='OpenFaceCls-')
cmd = [sys.executable, os.path.join(openfaceDir, 'util', 'align-dlib.py'),
os.path.join(lfwSubset, 'raw'), 'align', 'outerEyesAndNose',
os.path.join(workDir, 'aligned')]
p = Popen(cmd, stdout=PIPE, stderr=PIPE, universal_newlines=True)
(out, err) = p.communicate()
print(out)
print(err)
assert p.returncode == 0
cmd = [sys.executable, os.path.join(openfaceDir, 'util', 'align-dlib.py'),
os.path.join(lfwSubset, 'raw'), 'align', 'outerEyesAndNose',
os.path.join(workDir, 'aligned')]
p = Popen(cmd, stdout=PIPE, stderr=PIPE, universal_newlines=True)
(out, err) = p.communicate()
print(out)
print(err)
assert p.returncode == 0
cmd = ['th', './batch-represent/main.lua',
'-data', os.path.join(workDir, 'aligned'),
'-outDir', os.path.join(workDir, 'reps')]
p = Popen(cmd, stdout=PIPE, stderr=PIPE, universal_newlines=True)
(out, err) = p.communicate()
print(out)
print(err)
assert p.returncode == 0
cmd = [sys.executable, os.path.join(openfaceDir, 'demos', 'classifier.py'),
'train',
os.path.join(workDir, 'reps')]
p = Popen(cmd, stdout=PIPE, stderr=PIPE, universal_newlines=True)
(out, err) = p.communicate()
print(out)
print(err)
assert p.returncode == 0
cmd = [sys.executable, os.path.join(openfaceDir, 'demos', 'classifier.py'),
'infer',
os.path.join(workDir, 'reps', 'classifier.pkl'),
os.path.join(lfwSubset, 'raw', 'Adrien_Brody', 'Adrien_Brody_0001.jpg')]
p = Popen(cmd, stdout=PIPE, stderr=PIPE, universal_newlines=True)
(out, err) = p.communicate()
print(out)
print(err)
m = re.search('Predict (.*) with (.*) confidence', out)
assert m is not None
assert m.group(1) == 'Adrien_Brody'
assert float(m.group(2)) >= 0.80
shutil.rmtree(workDir)
|
eRestin/Mezz
|
refs/heads/master
|
mezzanine/utils/sites.py
|
7
|
import os
import sys
from django.contrib.sites.models import Site
from mezzanine.conf import settings
from mezzanine.core.request import current_request
def current_site_id():
"""
Responsible for determining the current ``Site`` instance to use
when retrieving data for any ``SiteRelated`` models. If a request
is available, and the site can be determined from it, we store the
site against the request for subsequent retrievals. Otherwise the
order of checks is as follows:
- ``site_id`` in session. Used in the admin so that admin users
can switch sites and stay on the same domain for the admin.
- host for the current request matched to the domain of the site
instance.
- ``MEZZANINE_SITE_ID`` environment variable, so management
commands or anything else outside of a request can specify a
site.
- ``SITE_ID`` setting.
"""
from mezzanine.utils.cache import cache_installed, cache_get, cache_set
request = current_request()
site_id = getattr(request, "site_id", None)
if request and not site_id:
site_id = request.session.get("site_id", None)
if not site_id:
domain = request.get_host().lower()
if cache_installed():
# Don't use Mezzanine's cache_key_prefix here, since it
# uses this very function we're in right now to create a
# per-site cache key.
bits = (settings.CACHE_MIDDLEWARE_KEY_PREFIX, domain)
cache_key = "%s.site_id.%s" % bits
site_id = cache_get(cache_key)
if not site_id:
try:
site = Site.objects.get(domain__iexact=domain)
except Site.DoesNotExist:
pass
else:
site_id = site.id
if cache_installed():
cache_set(cache_key, site_id)
if request and site_id:
request.site_id = site_id
if not site_id:
site_id = os.environ.get("MEZZANINE_SITE_ID", settings.SITE_ID)
return site_id
def has_site_permission(user):
"""
Checks if a staff user has staff-level access for the current site.
The actual permission lookup occurs in ``SitePermissionMiddleware``
which then marks the request with the ``has_site_permission`` flag,
so that we only query the db once per request, so this function
serves as the entry point for everything else to check access. We
also fall back to an ``is_staff`` check if the middleware is not
installed, to ease migration.
"""
mw = "mezzanine.core.middleware.SitePermissionMiddleware"
if mw not in settings.MIDDLEWARE_CLASSES:
from warnings import warn
warn(mw + " missing from settings.MIDDLEWARE_CLASSES - per site"
"permissions not applied")
return user.is_staff and user.is_active
return getattr(user, "has_site_permission", False)
def host_theme_path(request):
"""
Returns the directory of the theme associated with the given host.
"""
for (host, theme) in settings.HOST_THEMES:
if host.lower() == request.get_host().split(":")[0].lower():
try:
__import__(theme)
module = sys.modules[theme]
except ImportError:
pass
else:
return os.path.dirname(os.path.abspath(module.__file__))
return ""
def templates_for_host(request, templates):
"""
Given a template name (or list of them), returns the template names
as a list, with each name prefixed with the device directory
inserted into the front of the list.
"""
if not isinstance(templates, (list, tuple)):
templates = [templates]
theme_dir = host_theme_path(request)
host_templates = []
if theme_dir:
for template in templates:
host_templates.append("%s/templates/%s" % (theme_dir, template))
host_templates.append(template)
return host_templates
return templates
|
sleepers-anonymous/zscore
|
refs/heads/master
|
sleep/migrations/0007_remove_sleepergroup_defunctmembers.py
|
1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-08-24 06:53
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('sleep', '0006_remove_sleep_sleepcycles'),
]
operations = [
migrations.RemoveField(
model_name='sleepergroup',
name='defunctMembers',
),
]
|
rruebner/odoo
|
refs/heads/master
|
addons/hr_timesheet/report/__init__.py
|
395
|
import hr_timesheet_report
|
youdonghai/intellij-community
|
refs/heads/master
|
python/testData/intentions/PyConvertToFStringIntentionTest/formatMethodNestedFields3.py
|
31
|
'{:.{}}'.format(3.1415926, 3)
|
salamer/django
|
refs/heads/master
|
django/conf/locale/id/formats.py
|
504
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j N Y'
DATETIME_FORMAT = "j N Y, G.i"
TIME_FORMAT = 'G.i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd-m-Y'
SHORT_DATETIME_FORMAT = 'd-m-Y G.i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d-%m-%y', '%d/%m/%y', # '25-10-09', 25/10/09'
'%d-%m-%Y', '%d/%m/%Y', # '25-10-2009', 25/10/2009'
'%d %b %Y', # '25 Oct 2006',
'%d %B %Y', # '25 October 2006'
]
TIME_INPUT_FORMATS = [
'%H.%M.%S', # '14.30.59'
'%H.%M', # '14.30'
]
DATETIME_INPUT_FORMATS = [
'%d-%m-%Y %H.%M.%S', # '25-10-2009 14.30.59'
'%d-%m-%Y %H.%M.%S.%f', # '25-10-2009 14.30.59.000200'
'%d-%m-%Y %H.%M', # '25-10-2009 14.30'
'%d-%m-%Y', # '25-10-2009'
'%d-%m-%y %H.%M.%S', # '25-10-09' 14.30.59'
'%d-%m-%y %H.%M.%S.%f', # '25-10-09' 14.30.59.000200'
'%d-%m-%y %H.%M', # '25-10-09' 14.30'
'%d-%m-%y', # '25-10-09''
'%m/%d/%y %H.%M.%S', # '10/25/06 14.30.59'
'%m/%d/%y %H.%M.%S.%f', # '10/25/06 14.30.59.000200'
'%m/%d/%y %H.%M', # '10/25/06 14.30'
'%m/%d/%y', # '10/25/06'
'%m/%d/%Y %H.%M.%S', # '25/10/2009 14.30.59'
'%m/%d/%Y %H.%M.%S.%f', # '25/10/2009 14.30.59.000200'
'%m/%d/%Y %H.%M', # '25/10/2009 14.30'
'%m/%d/%Y', # '10/25/2009'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
benbox69/pyload
|
refs/heads/stable
|
module/plugins/hoster/OronCom.py
|
15
|
# -*- coding: utf-8 -*-
from module.plugins.internal.DeadHoster import DeadHoster, create_getInfo
class OronCom(DeadHoster):
__name__ = "OronCom"
__type__ = "hoster"
__version__ = "0.15"
__status__ = "testing"
__pattern__ = r'https?://(?:www\.)?oron\.com/\w{12}'
__config__ = [] #@TODO: Remove in 0.4.10
__description__ = """Oron.com hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("chrox", "chrox@pyload.org"),
("DHMH", "DHMH@pyload.org")]
getInfo = create_getInfo(OronCom)
|
mrogers950/origin
|
refs/heads/master
|
vendor/github.com/google/certificate-transparency/python/ct/crypto/verify_rsa.py
|
24
|
from ct.crypto import error
from ct.crypto import pem
from ct.proto import client_pb2
import Crypto.Hash.SHA256
import Crypto.PublicKey.RSA
import Crypto.Signature.PKCS1_v1_5
class RsaVerifier(object):
"""Verifies RSA signatures."""
# The signature algorithm used for this public key.
SIGNATURE_ALGORITHM = client_pb2.DigitallySigned.RSA
# The hash algorithm used for this public key.
HASH_ALGORITHM = client_pb2.DigitallySigned.SHA256
# Markers to look for when reading a PEM-encoded RSA public key.
__READ_MARKERS = ("PUBLIC KEY", "RSA PUBLIC KEY")
# A marker to write when writing a PEM-encoded RSA public key.
__WRITE_MARKER = "RSA PUBLIC KEY"
def __init__(self, key_info):
"""Creates a verifier that uses a PEM-encoded RSA public key.
Args:
- key_info: KeyInfo protobuf message
Raises:
- PemError: If the key has an invalid encoding
"""
if (key_info.type != client_pb2.KeyInfo.RSA):
raise error.UnsupportedAlgorithmError(
"Expected RSA key, but got key type %d" % key_info.type)
# Will raise a PemError on invalid encoding
self.__der, _ = pem.from_pem(key_info.pem_key, self.__READ_MARKERS)
try:
self.__key = Crypto.PublicKey.RSA.importKey(self.__der)
except (ValueError, IndexError, TypeError) as e:
raise error.EncodingError(e)
def __repr__(self):
return "%s(public key: %r)" % (self.__class__.__name__,
pem.to_pem(self.__der,
self.__WRITE_MARKER))
@error.returns_true_or_raises
def verify(self, signature_input, signature):
"""Verifies the signature was created by the owner of the public key.
Args:
- signature_input: The data that was originally signed.
- signature: An RSA SHA256 signature.
Returns:
- True if the signature verifies.
Raises:
- error.SignatureError: If the signature fails verification.
"""
verifier = Crypto.Signature.PKCS1_v1_5.new(self.__key)
sha256_hash = Crypto.Hash.SHA256.new(signature_input)
if verifier.verify(sha256_hash, signature):
return True
else:
raise error.SignatureError("Signature did not verify: %s",
signature.encode("hex"))
|
hedaoyuan/Paddle
|
refs/heads/master
|
python/paddle/trainer_config_helpers/tests/configs/test_spp_layer.py
|
13
|
from paddle.trainer_config_helpers import *
settings(batch_size=100, learning_rate=1e-5)
data = data_layer(name='data', size=3200, height=20, width=10)
spp = spp_layer(
input=data, pyramid_height=2, num_channels=16, pool_type=MaxPooling())
outputs(spp)
|
atul-bhouraskar/django
|
refs/heads/master
|
tests/migrations/migrations_test_apps/migrated_app/models.py
|
12133432
| |
sorenk/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/illumos/__init__.py
|
12133432
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.